2026-03-10T06:53:42.518 INFO:root:teuthology version: 1.2.4.dev6+g1c580df7a 2026-03-10T06:53:42.522 DEBUG:teuthology.report:Pushing job info to http://localhost:8080 2026-03-10T06:53:42.539 INFO:teuthology.run:Config: archive_path: /archive/kyr-2026-03-10_01:00:38-orch-squid-none-default-vps/937 branch: squid description: orch/cephadm/upgrade/{1-start-distro/1-start-centos_9.stream 2-repo_digest/repo_digest 3-upgrade/simple 4-wait 5-upgrade-ls agent/off mon_election/classic} email: null first_in_suite: false flavor: default job_id: '937' last_in_suite: false machine_type: vps name: kyr-2026-03-10_01:00:38-orch-squid-none-default-vps no_nested_subset: false os_type: centos os_version: 9.stream overrides: admin_socket: branch: squid ansible.cephlab: branch: main skip_tags: nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs vars: timezone: UTC ceph: conf: global: mon election default strategy: 1 mgr: debug mgr: 20 debug ms: 1 mgr/cephadm/use_agent: false mon: debug mon: 20 debug ms: 1 debug paxos: 20 osd: debug ms: 1 debug osd: 20 osd mclock iops capacity threshold hdd: 49000 flavor: default log-ignorelist: - \(MDS_ALL_DOWN\) - \(MDS_UP_LESS_THAN_MAX\) - CEPHADM_STRAY_DAEMON - CEPHADM_FAILED_DAEMON - CEPHADM_AGENT_DOWN log-only-match: - CEPHADM_ sha1: e911bdebe5c8faa3800735d1568fcdca65db60df ceph-deploy: conf: client: log file: /var/log/ceph/ceph-$name.$pid.log mon: {} install: ceph: flavor: default sha1: e911bdebe5c8faa3800735d1568fcdca65db60df extra_system_packages: deb: - python3-xmltodict - python3-jmespath rpm: - bzip2 - perl-Test-Harness - python3-xmltodict - python3-jmespath workunit: branch: tt-squid sha1: 75a68fd8ca3f918fe9466b4c0bb385b7fc260a9b owner: kyr priority: 1000 repo: https://github.com/ceph/ceph.git roles: - - mon.a - mon.c - mgr.y - osd.0 - osd.1 - osd.2 - osd.3 - client.0 - node-exporter.a - alertmanager.a - - mon.b - mgr.x - osd.4 - osd.5 - osd.6 - osd.7 - client.1 - prometheus.a - grafana.a - node-exporter.b seed: 8043 sha1: e911bdebe5c8faa3800735d1568fcdca65db60df sleep_before_teardown: 0 subset: 1/64 suite: orch suite_branch: tt-squid suite_path: /home/teuthos/src/github.com_kshtsk_ceph_75a68fd8ca3f918fe9466b4c0bb385b7fc260a9b/qa suite_relpath: qa suite_repo: https://github.com/kshtsk/ceph.git suite_sha1: 75a68fd8ca3f918fe9466b4c0bb385b7fc260a9b targets: vm02.local: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBKxnYM0110buuXn53r4F3H56tmvvXpXuBPJCD6Oueaw7P5npsxUhO4eNz0kTScd88M47tSQk9Z6KDTFePoBhtFs= vm05.local: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBIC87zMMNok5+Hk2egu0c9v19wRi5F3aVuDo0WCOPo7ZD1uK3aff4pRhVlzauAOhaBaRRhwXIYXLYiVvki30zNM= tasks: - cephadm: cephadm_branch: v17.2.0 cephadm_git_url: https://github.com/ceph/ceph image: quay.io/ceph/ceph:v17.2.0 - cephadm.shell: mon.a: - ceph config set mgr mgr/cephadm/use_repo_digest false --force - cephadm.shell: env: - sha1 mon.a: - radosgw-admin realm create --rgw-realm=r --default - radosgw-admin zonegroup create --rgw-zonegroup=default --master --default - radosgw-admin zone create --rgw-zonegroup=default --rgw-zone=z --master --default - radosgw-admin period update --rgw-realm=r --commit - ceph orch apply rgw foo --realm r --zone z --placement=2 --port=8000 - ceph orch apply rgw smpl - ceph osd pool create foo - rbd pool init foo - ceph orch apply iscsi foo u p - sleep 120 - ceph config set mon mon_warn_on_insecure_global_id_reclaim false --force - ceph config set mon mon_warn_on_insecure_global_id_reclaim_allowed false --force - ceph config set global log_to_journald false --force - ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 - cephadm.shell: env: - sha1 mon.a: - while ceph orch upgrade status | jq '.in_progress' | grep true && ! ceph orch upgrade status | jq '.message' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; ceph health detail ; sleep 30 ; done - ceph orch ps - ceph versions - echo "wait for servicemap items w/ changing names to refresh" - sleep 60 - ceph orch ps - ceph versions - ceph orch upgrade status - ceph health detail - ceph versions | jq -e '.overall | length == 1' - ceph versions | jq -e '.overall | keys' | grep $sha1 - ceph orch ls | grep '^osd ' - cephadm.shell: mon.a: - ceph orch upgrade ls - ceph orch upgrade ls --image quay.io/ceph/ceph --show-all-versions | grep 16.2.0 - ceph orch upgrade ls --image quay.io/ceph/ceph --tags | grep v16.2.2 teuthology: fragments_dropped: [] meta: {} postmerge: [] teuthology_branch: clyso-debian-13 teuthology_repo: https://github.com/clyso/teuthology teuthology_sha1: 1c580df7a9c7c2aadc272da296344fd99f27c444 timestamp: 2026-03-10_01:00:38 tube: vps user: kyr verbose: false worker_log: /home/teuthos/.teuthology/dispatcher/dispatcher.vps.611473 2026-03-10T06:53:42.539 INFO:teuthology.run:suite_path is set to /home/teuthos/src/github.com_kshtsk_ceph_75a68fd8ca3f918fe9466b4c0bb385b7fc260a9b/qa; will attempt to use it 2026-03-10T06:53:42.540 INFO:teuthology.run:Found tasks at /home/teuthos/src/github.com_kshtsk_ceph_75a68fd8ca3f918fe9466b4c0bb385b7fc260a9b/qa/tasks 2026-03-10T06:53:42.540 INFO:teuthology.run_tasks:Running task internal.check_packages... 2026-03-10T06:53:42.540 INFO:teuthology.task.internal:Checking packages... 2026-03-10T06:53:42.540 INFO:teuthology.task.internal:Checking packages for os_type 'centos', flavor 'default' and ceph hash 'e911bdebe5c8faa3800735d1568fcdca65db60df' 2026-03-10T06:53:42.540 WARNING:teuthology.packaging:More than one of ref, tag, branch, or sha1 supplied; using branch 2026-03-10T06:53:42.540 INFO:teuthology.packaging:ref: None 2026-03-10T06:53:42.540 INFO:teuthology.packaging:tag: None 2026-03-10T06:53:42.540 INFO:teuthology.packaging:branch: squid 2026-03-10T06:53:42.540 INFO:teuthology.packaging:sha1: e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T06:53:42.540 DEBUG:teuthology.packaging:Querying https://shaman.ceph.com/api/search?status=ready&project=ceph&flavor=default&distros=centos%2F9%2Fx86_64&ref=squid 2026-03-10T06:53:43.254 INFO:teuthology.task.internal:Found packages for ceph version 19.2.3-678.ge911bdeb 2026-03-10T06:53:43.255 INFO:teuthology.run_tasks:Running task internal.buildpackages_prep... 2026-03-10T06:53:43.256 INFO:teuthology.task.internal:no buildpackages task found 2026-03-10T06:53:43.256 INFO:teuthology.run_tasks:Running task internal.save_config... 2026-03-10T06:53:43.256 INFO:teuthology.task.internal:Saving configuration 2026-03-10T06:53:43.260 INFO:teuthology.run_tasks:Running task internal.check_lock... 2026-03-10T06:53:43.261 INFO:teuthology.task.internal.check_lock:Checking locks... 2026-03-10T06:53:43.267 DEBUG:teuthology.task.internal.check_lock:machine status is {'name': 'vm02.local', 'description': '/archive/kyr-2026-03-10_01:00:38-orch-squid-none-default-vps/937', 'up': True, 'machine_type': 'vps', 'is_vm': True, 'vm_host': {'name': 'localhost', 'description': None, 'up': True, 'machine_type': 'libvirt', 'is_vm': False, 'vm_host': None, 'os_type': None, 'os_version': None, 'arch': None, 'locked': True, 'locked_since': None, 'locked_by': None, 'mac_address': None, 'ssh_pub_key': None}, 'os_type': 'centos', 'os_version': '9.stream', 'arch': 'x86_64', 'locked': True, 'locked_since': '2026-03-10 06:52:34.949924', 'locked_by': 'kyr', 'mac_address': '52:55:00:00:00:02', 'ssh_pub_key': 'ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBKxnYM0110buuXn53r4F3H56tmvvXpXuBPJCD6Oueaw7P5npsxUhO4eNz0kTScd88M47tSQk9Z6KDTFePoBhtFs='} 2026-03-10T06:53:43.272 DEBUG:teuthology.task.internal.check_lock:machine status is {'name': 'vm05.local', 'description': '/archive/kyr-2026-03-10_01:00:38-orch-squid-none-default-vps/937', 'up': True, 'machine_type': 'vps', 'is_vm': True, 'vm_host': {'name': 'localhost', 'description': None, 'up': True, 'machine_type': 'libvirt', 'is_vm': False, 'vm_host': None, 'os_type': None, 'os_version': None, 'arch': None, 'locked': True, 'locked_since': None, 'locked_by': None, 'mac_address': None, 'ssh_pub_key': None}, 'os_type': 'centos', 'os_version': '9.stream', 'arch': 'x86_64', 'locked': True, 'locked_since': '2026-03-10 06:52:34.949522', 'locked_by': 'kyr', 'mac_address': '52:55:00:00:00:05', 'ssh_pub_key': 'ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBIC87zMMNok5+Hk2egu0c9v19wRi5F3aVuDo0WCOPo7ZD1uK3aff4pRhVlzauAOhaBaRRhwXIYXLYiVvki30zNM='} 2026-03-10T06:53:43.272 INFO:teuthology.run_tasks:Running task internal.add_remotes... 2026-03-10T06:53:43.273 INFO:teuthology.task.internal:roles: ubuntu@vm02.local - ['mon.a', 'mon.c', 'mgr.y', 'osd.0', 'osd.1', 'osd.2', 'osd.3', 'client.0', 'node-exporter.a', 'alertmanager.a'] 2026-03-10T06:53:43.273 INFO:teuthology.task.internal:roles: ubuntu@vm05.local - ['mon.b', 'mgr.x', 'osd.4', 'osd.5', 'osd.6', 'osd.7', 'client.1', 'prometheus.a', 'grafana.a', 'node-exporter.b'] 2026-03-10T06:53:43.273 INFO:teuthology.run_tasks:Running task console_log... 2026-03-10T06:53:43.279 DEBUG:teuthology.task.console_log:vm02 does not support IPMI; excluding 2026-03-10T06:53:43.284 DEBUG:teuthology.task.console_log:vm05 does not support IPMI; excluding 2026-03-10T06:53:43.284 DEBUG:teuthology.exit:Installing handler: Handler(exiter=, func=.kill_console_loggers at 0x7f1cb2d72170>, signals=[15]) 2026-03-10T06:53:43.284 INFO:teuthology.run_tasks:Running task internal.connect... 2026-03-10T06:53:43.285 INFO:teuthology.task.internal:Opening connections... 2026-03-10T06:53:43.285 DEBUG:teuthology.task.internal:connecting to ubuntu@vm02.local 2026-03-10T06:53:43.285 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm02.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-10T06:53:43.344 DEBUG:teuthology.task.internal:connecting to ubuntu@vm05.local 2026-03-10T06:53:43.344 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm05.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-10T06:53:43.401 INFO:teuthology.run_tasks:Running task internal.push_inventory... 2026-03-10T06:53:43.402 DEBUG:teuthology.orchestra.run.vm02:> uname -m 2026-03-10T06:53:43.443 INFO:teuthology.orchestra.run.vm02.stdout:x86_64 2026-03-10T06:53:43.443 DEBUG:teuthology.orchestra.run.vm02:> cat /etc/os-release 2026-03-10T06:53:43.497 INFO:teuthology.orchestra.run.vm02.stdout:NAME="CentOS Stream" 2026-03-10T06:53:43.497 INFO:teuthology.orchestra.run.vm02.stdout:VERSION="9" 2026-03-10T06:53:43.498 INFO:teuthology.orchestra.run.vm02.stdout:ID="centos" 2026-03-10T06:53:43.498 INFO:teuthology.orchestra.run.vm02.stdout:ID_LIKE="rhel fedora" 2026-03-10T06:53:43.498 INFO:teuthology.orchestra.run.vm02.stdout:VERSION_ID="9" 2026-03-10T06:53:43.498 INFO:teuthology.orchestra.run.vm02.stdout:PLATFORM_ID="platform:el9" 2026-03-10T06:53:43.498 INFO:teuthology.orchestra.run.vm02.stdout:PRETTY_NAME="CentOS Stream 9" 2026-03-10T06:53:43.498 INFO:teuthology.orchestra.run.vm02.stdout:ANSI_COLOR="0;31" 2026-03-10T06:53:43.498 INFO:teuthology.orchestra.run.vm02.stdout:LOGO="fedora-logo-icon" 2026-03-10T06:53:43.498 INFO:teuthology.orchestra.run.vm02.stdout:CPE_NAME="cpe:/o:centos:centos:9" 2026-03-10T06:53:43.498 INFO:teuthology.orchestra.run.vm02.stdout:HOME_URL="https://centos.org/" 2026-03-10T06:53:43.498 INFO:teuthology.orchestra.run.vm02.stdout:BUG_REPORT_URL="https://issues.redhat.com/" 2026-03-10T06:53:43.498 INFO:teuthology.orchestra.run.vm02.stdout:REDHAT_SUPPORT_PRODUCT="Red Hat Enterprise Linux 9" 2026-03-10T06:53:43.498 INFO:teuthology.orchestra.run.vm02.stdout:REDHAT_SUPPORT_PRODUCT_VERSION="CentOS Stream" 2026-03-10T06:53:43.498 INFO:teuthology.lock.ops:Updating vm02.local on lock server 2026-03-10T06:53:43.502 DEBUG:teuthology.orchestra.run.vm05:> uname -m 2026-03-10T06:53:43.516 INFO:teuthology.orchestra.run.vm05.stdout:x86_64 2026-03-10T06:53:43.517 DEBUG:teuthology.orchestra.run.vm05:> cat /etc/os-release 2026-03-10T06:53:43.570 INFO:teuthology.orchestra.run.vm05.stdout:NAME="CentOS Stream" 2026-03-10T06:53:43.570 INFO:teuthology.orchestra.run.vm05.stdout:VERSION="9" 2026-03-10T06:53:43.570 INFO:teuthology.orchestra.run.vm05.stdout:ID="centos" 2026-03-10T06:53:43.570 INFO:teuthology.orchestra.run.vm05.stdout:ID_LIKE="rhel fedora" 2026-03-10T06:53:43.570 INFO:teuthology.orchestra.run.vm05.stdout:VERSION_ID="9" 2026-03-10T06:53:43.570 INFO:teuthology.orchestra.run.vm05.stdout:PLATFORM_ID="platform:el9" 2026-03-10T06:53:43.570 INFO:teuthology.orchestra.run.vm05.stdout:PRETTY_NAME="CentOS Stream 9" 2026-03-10T06:53:43.570 INFO:teuthology.orchestra.run.vm05.stdout:ANSI_COLOR="0;31" 2026-03-10T06:53:43.570 INFO:teuthology.orchestra.run.vm05.stdout:LOGO="fedora-logo-icon" 2026-03-10T06:53:43.570 INFO:teuthology.orchestra.run.vm05.stdout:CPE_NAME="cpe:/o:centos:centos:9" 2026-03-10T06:53:43.570 INFO:teuthology.orchestra.run.vm05.stdout:HOME_URL="https://centos.org/" 2026-03-10T06:53:43.570 INFO:teuthology.orchestra.run.vm05.stdout:BUG_REPORT_URL="https://issues.redhat.com/" 2026-03-10T06:53:43.570 INFO:teuthology.orchestra.run.vm05.stdout:REDHAT_SUPPORT_PRODUCT="Red Hat Enterprise Linux 9" 2026-03-10T06:53:43.570 INFO:teuthology.orchestra.run.vm05.stdout:REDHAT_SUPPORT_PRODUCT_VERSION="CentOS Stream" 2026-03-10T06:53:43.570 INFO:teuthology.lock.ops:Updating vm05.local on lock server 2026-03-10T06:53:43.578 INFO:teuthology.run_tasks:Running task internal.serialize_remote_roles... 2026-03-10T06:53:43.580 INFO:teuthology.run_tasks:Running task internal.check_conflict... 2026-03-10T06:53:43.581 INFO:teuthology.task.internal:Checking for old test directory... 2026-03-10T06:53:43.581 DEBUG:teuthology.orchestra.run.vm02:> test '!' -e /home/ubuntu/cephtest 2026-03-10T06:53:43.583 DEBUG:teuthology.orchestra.run.vm05:> test '!' -e /home/ubuntu/cephtest 2026-03-10T06:53:43.624 INFO:teuthology.run_tasks:Running task internal.check_ceph_data... 2026-03-10T06:53:43.625 INFO:teuthology.task.internal:Checking for non-empty /var/lib/ceph... 2026-03-10T06:53:43.625 DEBUG:teuthology.orchestra.run.vm02:> test -z $(ls -A /var/lib/ceph) 2026-03-10T06:53:43.637 DEBUG:teuthology.orchestra.run.vm05:> test -z $(ls -A /var/lib/ceph) 2026-03-10T06:53:43.649 INFO:teuthology.orchestra.run.vm02.stderr:ls: cannot access '/var/lib/ceph': No such file or directory 2026-03-10T06:53:43.678 INFO:teuthology.orchestra.run.vm05.stderr:ls: cannot access '/var/lib/ceph': No such file or directory 2026-03-10T06:53:43.679 INFO:teuthology.run_tasks:Running task internal.vm_setup... 2026-03-10T06:53:43.686 DEBUG:teuthology.orchestra.run.vm02:> test -e /ceph-qa-ready 2026-03-10T06:53:43.702 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-10T06:53:43.887 DEBUG:teuthology.orchestra.run.vm05:> test -e /ceph-qa-ready 2026-03-10T06:53:43.901 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-10T06:53:44.070 INFO:teuthology.run_tasks:Running task internal.base... 2026-03-10T06:53:44.072 INFO:teuthology.task.internal:Creating test directory... 2026-03-10T06:53:44.072 DEBUG:teuthology.orchestra.run.vm02:> mkdir -p -m0755 -- /home/ubuntu/cephtest 2026-03-10T06:53:44.073 DEBUG:teuthology.orchestra.run.vm05:> mkdir -p -m0755 -- /home/ubuntu/cephtest 2026-03-10T06:53:44.086 INFO:teuthology.run_tasks:Running task internal.archive_upload... 2026-03-10T06:53:44.088 INFO:teuthology.run_tasks:Running task internal.archive... 2026-03-10T06:53:44.089 INFO:teuthology.task.internal:Creating archive directory... 2026-03-10T06:53:44.089 DEBUG:teuthology.orchestra.run.vm02:> install -d -m0755 -- /home/ubuntu/cephtest/archive 2026-03-10T06:53:44.127 DEBUG:teuthology.orchestra.run.vm05:> install -d -m0755 -- /home/ubuntu/cephtest/archive 2026-03-10T06:53:44.144 INFO:teuthology.run_tasks:Running task internal.coredump... 2026-03-10T06:53:44.145 INFO:teuthology.task.internal:Enabling coredump saving... 2026-03-10T06:53:44.145 DEBUG:teuthology.orchestra.run.vm02:> test -f /run/.containerenv -o -f /.dockerenv 2026-03-10T06:53:44.194 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-10T06:53:44.194 DEBUG:teuthology.orchestra.run.vm05:> test -f /run/.containerenv -o -f /.dockerenv 2026-03-10T06:53:44.207 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-10T06:53:44.207 DEBUG:teuthology.orchestra.run.vm02:> install -d -m0755 -- /home/ubuntu/cephtest/archive/coredump && sudo sysctl -w kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core && echo kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core | sudo tee -a /etc/sysctl.conf 2026-03-10T06:53:44.236 DEBUG:teuthology.orchestra.run.vm05:> install -d -m0755 -- /home/ubuntu/cephtest/archive/coredump && sudo sysctl -w kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core && echo kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core | sudo tee -a /etc/sysctl.conf 2026-03-10T06:53:44.256 INFO:teuthology.orchestra.run.vm02.stdout:kernel.core_pattern = /home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-10T06:53:44.265 INFO:teuthology.orchestra.run.vm02.stdout:kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-10T06:53:44.271 INFO:teuthology.orchestra.run.vm05.stdout:kernel.core_pattern = /home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-10T06:53:44.280 INFO:teuthology.orchestra.run.vm05.stdout:kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-10T06:53:44.281 INFO:teuthology.run_tasks:Running task internal.sudo... 2026-03-10T06:53:44.283 INFO:teuthology.task.internal:Configuring sudo... 2026-03-10T06:53:44.283 DEBUG:teuthology.orchestra.run.vm02:> sudo sed -i.orig.teuthology -e 's/^\([^#]*\) \(requiretty\)/\1 !\2/g' -e 's/^\([^#]*\) !\(visiblepw\)/\1 \2/g' /etc/sudoers 2026-03-10T06:53:44.309 DEBUG:teuthology.orchestra.run.vm05:> sudo sed -i.orig.teuthology -e 's/^\([^#]*\) \(requiretty\)/\1 !\2/g' -e 's/^\([^#]*\) !\(visiblepw\)/\1 \2/g' /etc/sudoers 2026-03-10T06:53:44.345 INFO:teuthology.run_tasks:Running task internal.syslog... 2026-03-10T06:53:44.347 INFO:teuthology.task.internal.syslog:Starting syslog monitoring... 2026-03-10T06:53:44.347 DEBUG:teuthology.orchestra.run.vm02:> mkdir -p -m0755 -- /home/ubuntu/cephtest/archive/syslog 2026-03-10T06:53:44.372 DEBUG:teuthology.orchestra.run.vm05:> mkdir -p -m0755 -- /home/ubuntu/cephtest/archive/syslog 2026-03-10T06:53:44.399 DEBUG:teuthology.orchestra.run.vm02:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-10T06:53:44.447 DEBUG:teuthology.orchestra.run.vm02:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-10T06:53:44.505 DEBUG:teuthology.orchestra.run.vm02:> set -ex 2026-03-10T06:53:44.505 DEBUG:teuthology.orchestra.run.vm02:> sudo dd of=/etc/rsyslog.d/80-cephtest.conf 2026-03-10T06:53:44.562 DEBUG:teuthology.orchestra.run.vm05:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-10T06:53:44.584 DEBUG:teuthology.orchestra.run.vm05:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-10T06:53:44.640 DEBUG:teuthology.orchestra.run.vm05:> set -ex 2026-03-10T06:53:44.640 DEBUG:teuthology.orchestra.run.vm05:> sudo dd of=/etc/rsyslog.d/80-cephtest.conf 2026-03-10T06:53:44.697 DEBUG:teuthology.orchestra.run.vm02:> sudo service rsyslog restart 2026-03-10T06:53:44.698 DEBUG:teuthology.orchestra.run.vm05:> sudo service rsyslog restart 2026-03-10T06:53:44.722 INFO:teuthology.orchestra.run.vm02.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-03-10T06:53:44.764 INFO:teuthology.orchestra.run.vm05.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-03-10T06:53:45.034 INFO:teuthology.run_tasks:Running task internal.timer... 2026-03-10T06:53:45.036 INFO:teuthology.task.internal:Starting timer... 2026-03-10T06:53:45.036 INFO:teuthology.run_tasks:Running task pcp... 2026-03-10T06:53:45.039 INFO:teuthology.run_tasks:Running task selinux... 2026-03-10T06:53:45.042 INFO:teuthology.task.selinux:Excluding vm02: VMs are not yet supported 2026-03-10T06:53:45.042 INFO:teuthology.task.selinux:Excluding vm05: VMs are not yet supported 2026-03-10T06:53:45.042 DEBUG:teuthology.task.selinux:Getting current SELinux state 2026-03-10T06:53:45.042 DEBUG:teuthology.task.selinux:Existing SELinux modes: {} 2026-03-10T06:53:45.042 INFO:teuthology.task.selinux:Putting SELinux into permissive mode 2026-03-10T06:53:45.042 INFO:teuthology.run_tasks:Running task ansible.cephlab... 2026-03-10T06:53:45.043 DEBUG:teuthology.task:Applying overrides for task ansible.cephlab: {'branch': 'main', 'skip_tags': 'nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs', 'vars': {'timezone': 'UTC'}} 2026-03-10T06:53:45.044 DEBUG:teuthology.repo_utils:Setting repo remote to https://github.com/ceph/ceph-cm-ansible.git 2026-03-10T06:53:45.045 INFO:teuthology.repo_utils:Fetching github.com_ceph_ceph-cm-ansible_main from origin 2026-03-10T06:53:45.508 DEBUG:teuthology.repo_utils:Resetting repo at /home/teuthos/src/github.com_ceph_ceph-cm-ansible_main to origin/main 2026-03-10T06:53:45.513 INFO:teuthology.task.ansible:Playbook: [{'import_playbook': 'ansible_managed.yml'}, {'import_playbook': 'teuthology.yml'}, {'hosts': 'testnodes', 'tasks': [{'set_fact': {'ran_from_cephlab_playbook': True}}]}, {'import_playbook': 'testnodes.yml'}, {'import_playbook': 'container-host.yml'}, {'import_playbook': 'cobbler.yml'}, {'import_playbook': 'paddles.yml'}, {'import_playbook': 'pulpito.yml'}, {'hosts': 'testnodes', 'become': True, 'tasks': [{'name': 'Touch /ceph-qa-ready', 'file': {'path': '/ceph-qa-ready', 'state': 'touch'}, 'when': 'ran_from_cephlab_playbook|bool'}]}] 2026-03-10T06:53:45.513 DEBUG:teuthology.task.ansible:Running ansible-playbook -v --extra-vars '{"ansible_ssh_user": "ubuntu", "timezone": "UTC"}' -i /tmp/teuth_ansible_inventorya6azsamj --limit vm02.local,vm05.local /home/teuthos/src/github.com_ceph_ceph-cm-ansible_main/cephlab.yml --skip-tags nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs 2026-03-10T06:55:43.620 DEBUG:teuthology.task.ansible:Reconnecting to [Remote(name='ubuntu@vm02.local'), Remote(name='ubuntu@vm05.local')] 2026-03-10T06:55:43.620 INFO:teuthology.orchestra.remote:Trying to reconnect to host 'ubuntu@vm02.local' 2026-03-10T06:55:43.621 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm02.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-10T06:55:43.682 DEBUG:teuthology.orchestra.run.vm02:> true 2026-03-10T06:55:43.756 INFO:teuthology.orchestra.remote:Successfully reconnected to host 'ubuntu@vm02.local' 2026-03-10T06:55:43.756 INFO:teuthology.orchestra.remote:Trying to reconnect to host 'ubuntu@vm05.local' 2026-03-10T06:55:43.757 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm05.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-10T06:55:43.820 DEBUG:teuthology.orchestra.run.vm05:> true 2026-03-10T06:55:43.894 INFO:teuthology.orchestra.remote:Successfully reconnected to host 'ubuntu@vm05.local' 2026-03-10T06:55:43.894 INFO:teuthology.run_tasks:Running task clock... 2026-03-10T06:55:43.897 INFO:teuthology.task.clock:Syncing clocks and checking initial clock skew... 2026-03-10T06:55:43.897 INFO:teuthology.orchestra.run:Running command with timeout 360 2026-03-10T06:55:43.897 DEBUG:teuthology.orchestra.run.vm02:> sudo systemctl stop ntp.service || sudo systemctl stop ntpd.service || sudo systemctl stop chronyd.service ; sudo ntpd -gq || sudo chronyc makestep ; sudo systemctl start ntp.service || sudo systemctl start ntpd.service || sudo systemctl start chronyd.service ; PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-03-10T06:55:43.898 INFO:teuthology.orchestra.run:Running command with timeout 360 2026-03-10T06:55:43.898 DEBUG:teuthology.orchestra.run.vm05:> sudo systemctl stop ntp.service || sudo systemctl stop ntpd.service || sudo systemctl stop chronyd.service ; sudo ntpd -gq || sudo chronyc makestep ; sudo systemctl start ntp.service || sudo systemctl start ntpd.service || sudo systemctl start chronyd.service ; PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-03-10T06:55:43.933 INFO:teuthology.orchestra.run.vm02.stderr:Failed to stop ntp.service: Unit ntp.service not loaded. 2026-03-10T06:55:43.948 INFO:teuthology.orchestra.run.vm02.stderr:Failed to stop ntpd.service: Unit ntpd.service not loaded. 2026-03-10T06:55:43.975 INFO:teuthology.orchestra.run.vm02.stderr:sudo: ntpd: command not found 2026-03-10T06:55:43.975 INFO:teuthology.orchestra.run.vm05.stderr:Failed to stop ntp.service: Unit ntp.service not loaded. 2026-03-10T06:55:43.991 INFO:teuthology.orchestra.run.vm02.stdout:506 Cannot talk to daemon 2026-03-10T06:55:43.993 INFO:teuthology.orchestra.run.vm05.stderr:Failed to stop ntpd.service: Unit ntpd.service not loaded. 2026-03-10T06:55:44.008 INFO:teuthology.orchestra.run.vm02.stderr:Failed to start ntp.service: Unit ntp.service not found. 2026-03-10T06:55:44.023 INFO:teuthology.orchestra.run.vm05.stderr:sudo: ntpd: command not found 2026-03-10T06:55:44.024 INFO:teuthology.orchestra.run.vm02.stderr:Failed to start ntpd.service: Unit ntpd.service not found. 2026-03-10T06:55:44.038 INFO:teuthology.orchestra.run.vm05.stdout:506 Cannot talk to daemon 2026-03-10T06:55:44.055 INFO:teuthology.orchestra.run.vm05.stderr:Failed to start ntp.service: Unit ntp.service not found. 2026-03-10T06:55:44.073 INFO:teuthology.orchestra.run.vm05.stderr:Failed to start ntpd.service: Unit ntpd.service not found. 2026-03-10T06:55:44.074 INFO:teuthology.orchestra.run.vm02.stderr:bash: line 1: ntpq: command not found 2026-03-10T06:55:44.076 INFO:teuthology.orchestra.run.vm02.stdout:MS Name/IP address Stratum Poll Reach LastRx Last sample 2026-03-10T06:55:44.076 INFO:teuthology.orchestra.run.vm02.stdout:=============================================================================== 2026-03-10T06:55:44.120 INFO:teuthology.orchestra.run.vm05.stderr:bash: line 1: ntpq: command not found 2026-03-10T06:55:44.122 INFO:teuthology.orchestra.run.vm05.stdout:MS Name/IP address Stratum Poll Reach LastRx Last sample 2026-03-10T06:55:44.122 INFO:teuthology.orchestra.run.vm05.stdout:=============================================================================== 2026-03-10T06:55:44.123 INFO:teuthology.run_tasks:Running task cephadm... 2026-03-10T06:55:44.170 INFO:tasks.cephadm:Config: {'cephadm_branch': 'v17.2.0', 'cephadm_git_url': 'https://github.com/ceph/ceph', 'image': 'quay.io/ceph/ceph:v17.2.0', 'conf': {'global': {'mon election default strategy': 1}, 'mgr': {'debug mgr': 20, 'debug ms': 1, 'mgr/cephadm/use_agent': False}, 'mon': {'debug mon': 20, 'debug ms': 1, 'debug paxos': 20}, 'osd': {'debug ms': 1, 'debug osd': 20, 'osd mclock iops capacity threshold hdd': 49000}}, 'flavor': 'default', 'log-ignorelist': ['\\(MDS_ALL_DOWN\\)', '\\(MDS_UP_LESS_THAN_MAX\\)', 'CEPHADM_STRAY_DAEMON', 'CEPHADM_FAILED_DAEMON', 'CEPHADM_AGENT_DOWN'], 'log-only-match': ['CEPHADM_'], 'sha1': 'e911bdebe5c8faa3800735d1568fcdca65db60df'} 2026-03-10T06:55:44.171 INFO:tasks.cephadm:Cluster image is quay.io/ceph/ceph:v17.2.0 2026-03-10T06:55:44.171 INFO:tasks.cephadm:Cluster fsid is 28bd35e6-1c4e-11f1-9057-21b3549603fc 2026-03-10T06:55:44.171 INFO:tasks.cephadm:Choosing monitor IPs and ports... 2026-03-10T06:55:44.171 INFO:tasks.cephadm:Monitor IPs: {'mon.a': '192.168.123.102', 'mon.c': '[v2:192.168.123.102:3301,v1:192.168.123.102:6790]', 'mon.b': '192.168.123.105'} 2026-03-10T06:55:44.171 INFO:tasks.cephadm:First mon is mon.a on vm02 2026-03-10T06:55:44.171 INFO:tasks.cephadm:First mgr is y 2026-03-10T06:55:44.171 INFO:tasks.cephadm:Normalizing hostnames... 2026-03-10T06:55:44.171 DEBUG:teuthology.orchestra.run.vm02:> sudo hostname $(hostname -s) 2026-03-10T06:55:44.201 DEBUG:teuthology.orchestra.run.vm05:> sudo hostname $(hostname -s) 2026-03-10T06:55:44.235 INFO:tasks.cephadm:Downloading cephadm (repo https://github.com/ceph/ceph ref v17.2.0)... 2026-03-10T06:55:44.235 DEBUG:teuthology.orchestra.run.vm02:> curl --silent https://raw.githubusercontent.com/ceph/ceph/v17.2.0/src/cephadm/cephadm > /home/ubuntu/cephtest/cephadm && ls -l /home/ubuntu/cephtest/cephadm 2026-03-10T06:55:44.479 INFO:teuthology.orchestra.run.vm02.stdout:-rw-r--r--. 1 ubuntu ubuntu 320521 Mar 10 06:55 /home/ubuntu/cephtest/cephadm 2026-03-10T06:55:44.479 DEBUG:teuthology.orchestra.run.vm05:> curl --silent https://raw.githubusercontent.com/ceph/ceph/v17.2.0/src/cephadm/cephadm > /home/ubuntu/cephtest/cephadm && ls -l /home/ubuntu/cephtest/cephadm 2026-03-10T06:55:44.560 INFO:teuthology.orchestra.run.vm05.stdout:-rw-r--r--. 1 ubuntu ubuntu 320521 Mar 10 06:55 /home/ubuntu/cephtest/cephadm 2026-03-10T06:55:44.560 DEBUG:teuthology.orchestra.run.vm02:> test -s /home/ubuntu/cephtest/cephadm && test $(stat -c%s /home/ubuntu/cephtest/cephadm) -gt 1000 && chmod +x /home/ubuntu/cephtest/cephadm 2026-03-10T06:55:44.584 DEBUG:teuthology.orchestra.run.vm05:> test -s /home/ubuntu/cephtest/cephadm && test $(stat -c%s /home/ubuntu/cephtest/cephadm) -gt 1000 && chmod +x /home/ubuntu/cephtest/cephadm 2026-03-10T06:55:44.605 INFO:tasks.cephadm:Pulling image quay.io/ceph/ceph:v17.2.0 on all hosts... 2026-03-10T06:55:44.605 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 pull 2026-03-10T06:55:44.626 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 pull 2026-03-10T06:55:44.837 INFO:teuthology.orchestra.run.vm02.stderr:Pulling container image quay.io/ceph/ceph:v17.2.0... 2026-03-10T06:55:44.860 INFO:teuthology.orchestra.run.vm05.stderr:Pulling container image quay.io/ceph/ceph:v17.2.0... 2026-03-10T06:56:06.409 INFO:teuthology.orchestra.run.vm05.stdout:{ 2026-03-10T06:56:06.409 INFO:teuthology.orchestra.run.vm05.stdout: "ceph_version": "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)", 2026-03-10T06:56:06.409 INFO:teuthology.orchestra.run.vm05.stdout: "image_id": "e1d6a67b021eb077ee22bf650f1a9fb1980a2cf5c36bdb9cba9eac6de8f702d9", 2026-03-10T06:56:06.409 INFO:teuthology.orchestra.run.vm05.stdout: "repo_digests": [ 2026-03-10T06:56:06.409 INFO:teuthology.orchestra.run.vm05.stdout: "quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a", 2026-03-10T06:56:06.409 INFO:teuthology.orchestra.run.vm05.stdout: "quay.io/ceph/ceph@sha256:cb4d698cb769b6aba05bf6ef04f41a7fe694160140347576e13bd9348514b667" 2026-03-10T06:56:06.409 INFO:teuthology.orchestra.run.vm05.stdout: ] 2026-03-10T06:56:06.409 INFO:teuthology.orchestra.run.vm05.stdout:} 2026-03-10T06:56:07.929 INFO:teuthology.orchestra.run.vm02.stdout:{ 2026-03-10T06:56:07.929 INFO:teuthology.orchestra.run.vm02.stdout: "ceph_version": "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)", 2026-03-10T06:56:07.929 INFO:teuthology.orchestra.run.vm02.stdout: "image_id": "e1d6a67b021eb077ee22bf650f1a9fb1980a2cf5c36bdb9cba9eac6de8f702d9", 2026-03-10T06:56:07.929 INFO:teuthology.orchestra.run.vm02.stdout: "repo_digests": [ 2026-03-10T06:56:07.929 INFO:teuthology.orchestra.run.vm02.stdout: "quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a", 2026-03-10T06:56:07.929 INFO:teuthology.orchestra.run.vm02.stdout: "quay.io/ceph/ceph@sha256:cb4d698cb769b6aba05bf6ef04f41a7fe694160140347576e13bd9348514b667" 2026-03-10T06:56:07.929 INFO:teuthology.orchestra.run.vm02.stdout: ] 2026-03-10T06:56:07.929 INFO:teuthology.orchestra.run.vm02.stdout:} 2026-03-10T06:56:07.948 DEBUG:teuthology.orchestra.run.vm02:> sudo mkdir -p /etc/ceph 2026-03-10T06:56:07.994 DEBUG:teuthology.orchestra.run.vm05:> sudo mkdir -p /etc/ceph 2026-03-10T06:56:08.023 DEBUG:teuthology.orchestra.run.vm02:> sudo chmod 777 /etc/ceph 2026-03-10T06:56:08.056 DEBUG:teuthology.orchestra.run.vm05:> sudo chmod 777 /etc/ceph 2026-03-10T06:56:08.088 INFO:tasks.cephadm:Writing seed config... 2026-03-10T06:56:08.089 INFO:tasks.cephadm: override: [global] mon election default strategy = 1 2026-03-10T06:56:08.089 INFO:tasks.cephadm: override: [mgr] debug mgr = 20 2026-03-10T06:56:08.089 INFO:tasks.cephadm: override: [mgr] debug ms = 1 2026-03-10T06:56:08.089 INFO:tasks.cephadm: override: [mgr] mgr/cephadm/use_agent = False 2026-03-10T06:56:08.089 INFO:tasks.cephadm: override: [mon] debug mon = 20 2026-03-10T06:56:08.089 INFO:tasks.cephadm: override: [mon] debug ms = 1 2026-03-10T06:56:08.089 INFO:tasks.cephadm: override: [mon] debug paxos = 20 2026-03-10T06:56:08.089 INFO:tasks.cephadm: override: [osd] debug ms = 1 2026-03-10T06:56:08.089 INFO:tasks.cephadm: override: [osd] debug osd = 20 2026-03-10T06:56:08.089 INFO:tasks.cephadm: override: [osd] osd mclock iops capacity threshold hdd = 49000 2026-03-10T06:56:08.089 DEBUG:teuthology.orchestra.run.vm02:> set -ex 2026-03-10T06:56:08.089 DEBUG:teuthology.orchestra.run.vm02:> dd of=/home/ubuntu/cephtest/seed.ceph.conf 2026-03-10T06:56:08.114 DEBUG:tasks.cephadm:Final config: [global] # make logging friendly to teuthology log_to_file = true log_to_stderr = false log to journald = false mon cluster log to file = true mon cluster log file level = debug mon clock drift allowed = 1.000 # replicate across OSDs, not hosts osd crush chooseleaf type = 0 #osd pool default size = 2 osd pool default erasure code profile = plugin=jerasure technique=reed_sol_van k=2 m=1 crush-failure-domain=osd # enable some debugging auth debug = true ms die on old message = true ms die on bug = true debug asserts on shutdown = true # adjust warnings mon max pg per osd = 10000# >= luminous mon pg warn max object skew = 0 mon osd allow primary affinity = true mon osd allow pg remap = true mon warn on legacy crush tunables = false mon warn on crush straw calc version zero = false mon warn on no sortbitwise = false mon warn on osd down out interval zero = false mon warn on too few osds = false mon_warn_on_pool_pg_num_not_power_of_two = false # disable pg_autoscaler by default for new pools osd_pool_default_pg_autoscale_mode = off # tests delete pools mon allow pool delete = true fsid = 28bd35e6-1c4e-11f1-9057-21b3549603fc mon election default strategy = 1 [osd] osd scrub load threshold = 5.0 osd scrub max interval = 600 osd mclock profile = high_recovery_ops osd recover clone overlap = true osd recovery max chunk = 1048576 osd deep scrub update digest min age = 30 osd map max advance = 10 osd memory target autotune = true # debugging osd debug shutdown = true osd debug op order = true osd debug verify stray on activate = true osd debug pg log writeout = true osd debug verify cached snaps = true osd debug verify missing on start = true osd debug misdirected ops = true osd op queue = debug_random osd op queue cut off = debug_random osd shutdown pgref assert = true bdev debug aio = true osd sloppy crc = true debug ms = 1 debug osd = 20 osd mclock iops capacity threshold hdd = 49000 [mgr] mon reweight min pgs per osd = 4 mon reweight min bytes per osd = 10 mgr/telemetry/nag = false debug mgr = 20 debug ms = 1 mgr/cephadm/use_agent = False [mon] mon data avail warn = 5 mon mgr mkfs grace = 240 mon reweight min pgs per osd = 4 mon osd reporter subtree level = osd mon osd prime pg temp = true mon reweight min bytes per osd = 10 # rotate auth tickets quickly to exercise renewal paths auth mon ticket ttl = 660# 11m auth service ticket ttl = 240# 4m # don't complain about global id reclaim mon_warn_on_insecure_global_id_reclaim = false mon_warn_on_insecure_global_id_reclaim_allowed = false debug mon = 20 debug ms = 1 debug paxos = 20 [client.rgw] rgw cache enabled = true rgw enable ops log = true rgw enable usage log = true 2026-03-10T06:56:08.115 DEBUG:teuthology.orchestra.run.vm02:mon.a> sudo journalctl -f -n 0 -u ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@mon.a.service 2026-03-10T06:56:08.157 DEBUG:teuthology.orchestra.run.vm02:mgr.y> sudo journalctl -f -n 0 -u ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@mgr.y.service 2026-03-10T06:56:08.200 INFO:tasks.cephadm:Bootstrapping... 2026-03-10T06:56:08.201 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 -v bootstrap --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc --config /home/ubuntu/cephtest/seed.ceph.conf --output-config /etc/ceph/ceph.conf --output-keyring /etc/ceph/ceph.client.admin.keyring --output-pub-ssh-key /home/ubuntu/cephtest/ceph.pub --mon-id a --mgr-id y --orphan-initial-daemons --skip-monitoring-stack --mon-ip 192.168.123.102 --skip-admin-label && sudo chmod +r /etc/ceph/ceph.client.admin.keyring 2026-03-10T06:56:08.388 INFO:teuthology.orchestra.run.vm02.stderr:-------------------------------------------------------------------------------- 2026-03-10T06:56:08.388 INFO:teuthology.orchestra.run.vm02.stderr:cephadm ['--image', 'quay.io/ceph/ceph:v17.2.0', '-v', 'bootstrap', '--fsid', '28bd35e6-1c4e-11f1-9057-21b3549603fc', '--config', '/home/ubuntu/cephtest/seed.ceph.conf', '--output-config', '/etc/ceph/ceph.conf', '--output-keyring', '/etc/ceph/ceph.client.admin.keyring', '--output-pub-ssh-key', '/home/ubuntu/cephtest/ceph.pub', '--mon-id', 'a', '--mgr-id', 'y', '--orphan-initial-daemons', '--skip-monitoring-stack', '--mon-ip', '192.168.123.102', '--skip-admin-label'] 2026-03-10T06:56:08.413 INFO:teuthology.orchestra.run.vm02.stderr:/bin/podman: 5.8.0 2026-03-10T06:56:08.416 INFO:teuthology.orchestra.run.vm02.stderr:Verifying podman|docker is present... 2026-03-10T06:56:08.452 INFO:teuthology.orchestra.run.vm02.stderr:/bin/podman: 5.8.0 2026-03-10T06:56:08.455 INFO:teuthology.orchestra.run.vm02.stderr:Verifying lvm2 is present... 2026-03-10T06:56:08.455 INFO:teuthology.orchestra.run.vm02.stderr:Verifying time synchronization is in place... 2026-03-10T06:56:08.465 INFO:teuthology.orchestra.run.vm02.stderr:systemctl: Failed to get unit file state for chrony.service: No such file or directory 2026-03-10T06:56:08.475 INFO:teuthology.orchestra.run.vm02.stderr:systemctl: inactive 2026-03-10T06:56:08.482 INFO:teuthology.orchestra.run.vm02.stderr:systemctl: enabled 2026-03-10T06:56:08.489 INFO:teuthology.orchestra.run.vm02.stderr:systemctl: active 2026-03-10T06:56:08.489 INFO:teuthology.orchestra.run.vm02.stderr:Unit chronyd.service is enabled and running 2026-03-10T06:56:08.489 INFO:teuthology.orchestra.run.vm02.stderr:Repeating the final host check... 2026-03-10T06:56:08.511 INFO:teuthology.orchestra.run.vm02.stderr:/bin/podman: 5.8.0 2026-03-10T06:56:08.514 INFO:teuthology.orchestra.run.vm02.stderr:podman (/bin/podman) version 5.8.0 is present 2026-03-10T06:56:08.514 INFO:teuthology.orchestra.run.vm02.stderr:systemctl is present 2026-03-10T06:56:08.514 INFO:teuthology.orchestra.run.vm02.stderr:lvcreate is present 2026-03-10T06:56:08.522 INFO:teuthology.orchestra.run.vm02.stderr:systemctl: Failed to get unit file state for chrony.service: No such file or directory 2026-03-10T06:56:08.532 INFO:teuthology.orchestra.run.vm02.stderr:systemctl: inactive 2026-03-10T06:56:08.542 INFO:teuthology.orchestra.run.vm02.stderr:systemctl: enabled 2026-03-10T06:56:08.551 INFO:teuthology.orchestra.run.vm02.stderr:systemctl: active 2026-03-10T06:56:08.552 INFO:teuthology.orchestra.run.vm02.stderr:Unit chronyd.service is enabled and running 2026-03-10T06:56:08.552 INFO:teuthology.orchestra.run.vm02.stderr:Host looks OK 2026-03-10T06:56:08.552 INFO:teuthology.orchestra.run.vm02.stderr:Cluster fsid: 28bd35e6-1c4e-11f1-9057-21b3549603fc 2026-03-10T06:56:08.552 INFO:teuthology.orchestra.run.vm02.stderr:Acquiring lock 139639965875264 on /run/cephadm/28bd35e6-1c4e-11f1-9057-21b3549603fc.lock 2026-03-10T06:56:08.552 INFO:teuthology.orchestra.run.vm02.stderr:Lock 139639965875264 acquired on /run/cephadm/28bd35e6-1c4e-11f1-9057-21b3549603fc.lock 2026-03-10T06:56:08.552 INFO:teuthology.orchestra.run.vm02.stderr:Verifying IP 192.168.123.102 port 3300 ... 2026-03-10T06:56:08.552 INFO:teuthology.orchestra.run.vm02.stderr:Verifying IP 192.168.123.102 port 6789 ... 2026-03-10T06:56:08.553 INFO:teuthology.orchestra.run.vm02.stderr:Base mon IP is 192.168.123.102, final addrv is [v2:192.168.123.102:3300,v1:192.168.123.102:6789] 2026-03-10T06:56:08.556 INFO:teuthology.orchestra.run.vm02.stderr:/sbin/ip: default via 192.168.123.1 dev eth0 proto dhcp src 192.168.123.102 metric 100 2026-03-10T06:56:08.556 INFO:teuthology.orchestra.run.vm02.stderr:/sbin/ip: 192.168.123.0/24 dev eth0 proto kernel scope link src 192.168.123.102 metric 100 2026-03-10T06:56:08.559 INFO:teuthology.orchestra.run.vm02.stderr:/sbin/ip: ::1 dev lo proto kernel metric 256 pref medium 2026-03-10T06:56:08.559 INFO:teuthology.orchestra.run.vm02.stderr:/sbin/ip: fe80::/64 dev eth0 proto kernel metric 1024 pref medium 2026-03-10T06:56:08.562 INFO:teuthology.orchestra.run.vm02.stderr:/sbin/ip: 1: lo: mtu 65536 state UNKNOWN qlen 1000 2026-03-10T06:56:08.562 INFO:teuthology.orchestra.run.vm02.stderr:/sbin/ip: inet6 ::1/128 scope host 2026-03-10T06:56:08.562 INFO:teuthology.orchestra.run.vm02.stderr:/sbin/ip: valid_lft forever preferred_lft forever 2026-03-10T06:56:08.562 INFO:teuthology.orchestra.run.vm02.stderr:/sbin/ip: 2: eth0: mtu 1500 state UP qlen 1000 2026-03-10T06:56:08.562 INFO:teuthology.orchestra.run.vm02.stderr:/sbin/ip: inet6 fe80::5055:ff:fe00:2/64 scope link noprefixroute 2026-03-10T06:56:08.562 INFO:teuthology.orchestra.run.vm02.stderr:/sbin/ip: valid_lft forever preferred_lft forever 2026-03-10T06:56:08.563 INFO:teuthology.orchestra.run.vm02.stderr:Mon IP `192.168.123.102` is in CIDR network `192.168.123.0/24` 2026-03-10T06:56:08.563 INFO:teuthology.orchestra.run.vm02.stderr:- internal network (--cluster-network) has not been provided, OSD replication will default to the public_network 2026-03-10T06:56:08.563 INFO:teuthology.orchestra.run.vm02.stderr:Pulling container image quay.io/ceph/ceph:v17.2.0... 2026-03-10T06:56:08.587 INFO:teuthology.orchestra.run.vm02.stderr:/bin/podman: Trying to pull quay.io/ceph/ceph:v17.2.0... 2026-03-10T06:56:09.789 INFO:teuthology.orchestra.run.vm02.stderr:/bin/podman: Getting image source signatures 2026-03-10T06:56:09.789 INFO:teuthology.orchestra.run.vm02.stderr:/bin/podman: Copying blob sha256:33ca8fff7868c4dc0c11e09bca97c720eb9cfbab7221216754367dd8de70388a 2026-03-10T06:56:09.789 INFO:teuthology.orchestra.run.vm02.stderr:/bin/podman: Copying blob sha256:89b4a75bc2d8500f15463747507c9623df43886c134463e7f0527e70900e7a7b 2026-03-10T06:56:09.789 INFO:teuthology.orchestra.run.vm02.stderr:/bin/podman: Copying blob sha256:a70843738bb77e1ab9c1f85969ebdfa55f178e746be081d1cb4f94011f69eb7c 2026-03-10T06:56:09.790 INFO:teuthology.orchestra.run.vm02.stderr:/bin/podman: Copying blob sha256:c32ab78b488d0c72f64eded765c0cf6b5bf2c75dab66cb62a9d367fa6ec42513 2026-03-10T06:56:09.790 INFO:teuthology.orchestra.run.vm02.stderr:/bin/podman: Copying blob sha256:599d07cb321ff0a3c82224e1138fc685793fa69b93ed5780415751a5f7e4b8c2 2026-03-10T06:56:09.790 INFO:teuthology.orchestra.run.vm02.stderr:/bin/podman: Copying config sha256:e1d6a67b021eb077ee22bf650f1a9fb1980a2cf5c36bdb9cba9eac6de8f702d9 2026-03-10T06:56:09.831 INFO:teuthology.orchestra.run.vm02.stderr:/bin/podman: Writing manifest to image destination 2026-03-10T06:56:09.836 INFO:teuthology.orchestra.run.vm02.stderr:/bin/podman: e1d6a67b021eb077ee22bf650f1a9fb1980a2cf5c36bdb9cba9eac6de8f702d9 2026-03-10T06:56:10.017 INFO:teuthology.orchestra.run.vm02.stderr:ceph: ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable) 2026-03-10T06:56:10.082 INFO:teuthology.orchestra.run.vm02.stderr:Ceph version: ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable) 2026-03-10T06:56:10.082 INFO:teuthology.orchestra.run.vm02.stderr:Extracting ceph user uid/gid from container image... 2026-03-10T06:56:10.171 INFO:teuthology.orchestra.run.vm02.stderr:stat: 167 167 2026-03-10T06:56:10.196 INFO:teuthology.orchestra.run.vm02.stderr:Creating initial keys... 2026-03-10T06:56:10.293 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph-authtool: AQCKwK9pkjd1ERAA/5opVyBOjFBB076uSnXVLA== 2026-03-10T06:56:10.414 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph-authtool: AQCKwK9p3jupGBAAF8E5TftXn26jzaX4jRNyaw== 2026-03-10T06:56:10.517 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph-authtool: AQCKwK9p3SHbHhAAYYN582s0EtDu4yKIzLP3rA== 2026-03-10T06:56:10.545 INFO:teuthology.orchestra.run.vm02.stderr:Creating initial monmap... 2026-03-10T06:56:10.631 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/monmaptool: /usr/bin/monmaptool: monmap file /tmp/monmap 2026-03-10T06:56:10.632 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/monmaptool: setting min_mon_release = octopus 2026-03-10T06:56:10.632 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/monmaptool: /usr/bin/monmaptool: set fsid to 28bd35e6-1c4e-11f1-9057-21b3549603fc 2026-03-10T06:56:10.632 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/monmaptool: /usr/bin/monmaptool: writing epoch 0 to /tmp/monmap (1 monitors) 2026-03-10T06:56:10.659 INFO:teuthology.orchestra.run.vm02.stderr:monmaptool for a [v2:192.168.123.102:3300,v1:192.168.123.102:6789] on /usr/bin/monmaptool: monmap file /tmp/monmap 2026-03-10T06:56:10.659 INFO:teuthology.orchestra.run.vm02.stderr:setting min_mon_release = octopus 2026-03-10T06:56:10.659 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/monmaptool: set fsid to 28bd35e6-1c4e-11f1-9057-21b3549603fc 2026-03-10T06:56:10.659 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/monmaptool: writing epoch 0 to /tmp/monmap (1 monitors) 2026-03-10T06:56:10.659 INFO:teuthology.orchestra.run.vm02.stderr: 2026-03-10T06:56:10.659 INFO:teuthology.orchestra.run.vm02.stderr:Creating mon... 2026-03-10T06:56:10.818 INFO:teuthology.orchestra.run.vm02.stderr:create mon.a on 2026-03-10T06:56:11.028 INFO:teuthology.orchestra.run.vm02.stderr:systemctl: Created symlink /etc/systemd/system/multi-user.target.wants/ceph.target → /etc/systemd/system/ceph.target. 2026-03-10T06:56:11.200 INFO:teuthology.orchestra.run.vm02.stderr:systemctl: Created symlink /etc/systemd/system/multi-user.target.wants/ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc.target → /etc/systemd/system/ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc.target. 2026-03-10T06:56:11.200 INFO:teuthology.orchestra.run.vm02.stderr:systemctl: Created symlink /etc/systemd/system/ceph.target.wants/ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc.target → /etc/systemd/system/ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc.target. 2026-03-10T06:56:11.499 INFO:teuthology.orchestra.run.vm02.stderr:systemctl: Failed to reset failed state of unit ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@mon.a.service: Unit ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@mon.a.service not loaded. 2026-03-10T06:56:11.509 INFO:teuthology.orchestra.run.vm02.stderr:systemctl: Created symlink /etc/systemd/system/ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc.target.wants/ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@mon.a.service → /etc/systemd/system/ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@.service. 2026-03-10T06:56:12.090 INFO:teuthology.orchestra.run.vm02.stderr:firewalld does not appear to be present 2026-03-10T06:56:12.090 INFO:teuthology.orchestra.run.vm02.stderr:Not possible to enable service . firewalld.service is not available 2026-03-10T06:56:12.090 INFO:teuthology.orchestra.run.vm02.stderr:Waiting for mon to start... 2026-03-10T06:56:12.090 INFO:teuthology.orchestra.run.vm02.stderr:Waiting for mon... 2026-03-10T06:56:12.317 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: cluster: 2026-03-10T06:56:12.317 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: id: 28bd35e6-1c4e-11f1-9057-21b3549603fc 2026-03-10T06:56:12.317 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: health: HEALTH_OK 2026-03-10T06:56:12.317 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: 2026-03-10T06:56:12.317 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: services: 2026-03-10T06:56:12.317 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: mon: 1 daemons, quorum a (age 0.193446s) 2026-03-10T06:56:12.317 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: mgr: no daemons active 2026-03-10T06:56:12.317 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: osd: 0 osds: 0 up, 0 in 2026-03-10T06:56:12.317 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: 2026-03-10T06:56:12.317 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: data: 2026-03-10T06:56:12.317 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: pools: 0 pools, 0 pgs 2026-03-10T06:56:12.317 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: objects: 0 objects, 0 B 2026-03-10T06:56:12.317 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: usage: 0 B used, 0 B / 0 B avail 2026-03-10T06:56:12.317 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: pgs: 2026-03-10T06:56:12.317 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: 2026-03-10T06:56:12.999 INFO:teuthology.orchestra.run.vm02.stderr:mon is available 2026-03-10T06:56:12.999 INFO:teuthology.orchestra.run.vm02.stderr:Assimilating anything we can from ceph.conf... 2026-03-10T06:56:13.202 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: 2026-03-10T06:56:13.203 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: [global] 2026-03-10T06:56:13.203 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: fsid = 28bd35e6-1c4e-11f1-9057-21b3549603fc 2026-03-10T06:56:13.203 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: mon_host = [v2:192.168.123.102:3300,v1:192.168.123.102:6789] 2026-03-10T06:56:13.203 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: mon_osd_allow_pg_remap = true 2026-03-10T06:56:13.203 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: mon_osd_allow_primary_affinity = true 2026-03-10T06:56:13.203 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: mon_warn_on_no_sortbitwise = false 2026-03-10T06:56:13.203 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: osd_crush_chooseleaf_type = 0 2026-03-10T06:56:13.203 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: 2026-03-10T06:56:13.203 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: [mgr] 2026-03-10T06:56:13.203 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: mgr/cephadm/use_agent = False 2026-03-10T06:56:13.203 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: mgr/telemetry/nag = false 2026-03-10T06:56:13.203 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: 2026-03-10T06:56:13.203 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: [osd] 2026-03-10T06:56:13.203 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: osd_map_max_advance = 10 2026-03-10T06:56:13.203 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: osd_mclock_iops_capacity_threshold_hdd = 49000 2026-03-10T06:56:13.203 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: osd_sloppy_crc = true 2026-03-10T06:56:13.230 INFO:teuthology.orchestra.run.vm02.stderr:Generating new minimal ceph.conf... 2026-03-10T06:56:13.480 INFO:teuthology.orchestra.run.vm02.stderr:Restarting the monitor... 2026-03-10T06:56:13.811 INFO:teuthology.orchestra.run.vm02.stderr:Setting mon public_network to 192.168.123.0/24 2026-03-10T06:56:14.025 INFO:teuthology.orchestra.run.vm02.stderr:Wrote config to /etc/ceph/ceph.conf 2026-03-10T06:56:14.025 INFO:teuthology.orchestra.run.vm02.stderr:Wrote keyring to /etc/ceph/ceph.client.admin.keyring 2026-03-10T06:56:14.025 INFO:teuthology.orchestra.run.vm02.stderr:Creating mgr... 2026-03-10T06:56:14.025 INFO:teuthology.orchestra.run.vm02.stderr:Verifying port 9283 ... 2026-03-10T06:56:14.207 INFO:teuthology.orchestra.run.vm02.stderr:systemctl: Failed to reset failed state of unit ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@mgr.y.service: Unit ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@mgr.y.service not loaded. 2026-03-10T06:56:14.214 INFO:teuthology.orchestra.run.vm02.stderr:systemctl: Created symlink /etc/systemd/system/ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc.target.wants/ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@mgr.y.service → /etc/systemd/system/ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@.service. 2026-03-10T06:56:14.542 INFO:teuthology.orchestra.run.vm02.stderr:firewalld does not appear to be present 2026-03-10T06:56:14.542 INFO:teuthology.orchestra.run.vm02.stderr:Not possible to enable service . firewalld.service is not available 2026-03-10T06:56:14.542 INFO:teuthology.orchestra.run.vm02.stderr:firewalld does not appear to be present 2026-03-10T06:56:14.542 INFO:teuthology.orchestra.run.vm02.stderr:Not possible to open ports <[9283]>. firewalld.service is not available 2026-03-10T06:56:14.542 INFO:teuthology.orchestra.run.vm02.stderr:Waiting for mgr to start... 2026-03-10T06:56:14.542 INFO:teuthology.orchestra.run.vm02.stderr:Waiting for mgr... 2026-03-10T06:56:14.782 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: 2026-03-10T06:56:14.782 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: { 2026-03-10T06:56:14.782 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "fsid": "28bd35e6-1c4e-11f1-9057-21b3549603fc", 2026-03-10T06:56:14.782 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "health": { 2026-03-10T06:56:14.782 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "status": "HEALTH_OK", 2026-03-10T06:56:14.782 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "checks": {}, 2026-03-10T06:56:14.782 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "mutes": [] 2026-03-10T06:56:14.782 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: }, 2026-03-10T06:56:14.782 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "election_epoch": 5, 2026-03-10T06:56:14.782 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "quorum": [ 2026-03-10T06:56:14.782 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: 0 2026-03-10T06:56:14.782 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: ], 2026-03-10T06:56:14.782 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "quorum_names": [ 2026-03-10T06:56:14.782 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "a" 2026-03-10T06:56:14.782 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: ], 2026-03-10T06:56:14.782 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "quorum_age": 0, 2026-03-10T06:56:14.782 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "monmap": { 2026-03-10T06:56:14.782 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-10T06:56:14.782 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "min_mon_release_name": "quincy", 2026-03-10T06:56:14.782 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "num_mons": 1 2026-03-10T06:56:14.782 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: }, 2026-03-10T06:56:14.782 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "osdmap": { 2026-03-10T06:56:14.782 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-10T06:56:14.783 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "num_osds": 0, 2026-03-10T06:56:14.783 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "num_up_osds": 0, 2026-03-10T06:56:14.783 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "osd_up_since": 0, 2026-03-10T06:56:14.783 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "num_in_osds": 0, 2026-03-10T06:56:14.783 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "osd_in_since": 0, 2026-03-10T06:56:14.783 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "num_remapped_pgs": 0 2026-03-10T06:56:14.783 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: }, 2026-03-10T06:56:14.783 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "pgmap": { 2026-03-10T06:56:14.783 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "pgs_by_state": [], 2026-03-10T06:56:14.783 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "num_pgs": 0, 2026-03-10T06:56:14.783 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "num_pools": 0, 2026-03-10T06:56:14.783 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "num_objects": 0, 2026-03-10T06:56:14.783 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "data_bytes": 0, 2026-03-10T06:56:14.783 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "bytes_used": 0, 2026-03-10T06:56:14.783 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "bytes_avail": 0, 2026-03-10T06:56:14.783 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "bytes_total": 0 2026-03-10T06:56:14.783 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: }, 2026-03-10T06:56:14.783 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "fsmap": { 2026-03-10T06:56:14.783 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-10T06:56:14.783 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "by_rank": [], 2026-03-10T06:56:14.783 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "up:standby": 0 2026-03-10T06:56:14.783 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: }, 2026-03-10T06:56:14.783 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "mgrmap": { 2026-03-10T06:56:14.783 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "available": false, 2026-03-10T06:56:14.783 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "num_standbys": 0, 2026-03-10T06:56:14.783 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "modules": [ 2026-03-10T06:56:14.783 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "iostat", 2026-03-10T06:56:14.783 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "nfs", 2026-03-10T06:56:14.783 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "restful" 2026-03-10T06:56:14.783 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: ], 2026-03-10T06:56:14.783 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "services": {} 2026-03-10T06:56:14.783 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: }, 2026-03-10T06:56:14.783 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "servicemap": { 2026-03-10T06:56:14.783 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-10T06:56:14.783 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "modified": "2026-03-10T06:56:12.128551+0000", 2026-03-10T06:56:14.783 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "services": {} 2026-03-10T06:56:14.783 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: }, 2026-03-10T06:56:14.783 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "progress_events": {} 2026-03-10T06:56:14.783 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: } 2026-03-10T06:56:14.850 INFO:teuthology.orchestra.run.vm02.stderr:mgr not available, waiting (1/15)... 2026-03-10T06:56:17.098 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: 2026-03-10T06:56:17.098 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: { 2026-03-10T06:56:17.099 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "fsid": "28bd35e6-1c4e-11f1-9057-21b3549603fc", 2026-03-10T06:56:17.099 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "health": { 2026-03-10T06:56:17.099 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "status": "HEALTH_OK", 2026-03-10T06:56:17.099 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "checks": {}, 2026-03-10T06:56:17.099 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "mutes": [] 2026-03-10T06:56:17.099 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: }, 2026-03-10T06:56:17.099 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "election_epoch": 5, 2026-03-10T06:56:17.099 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "quorum": [ 2026-03-10T06:56:17.099 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: 0 2026-03-10T06:56:17.099 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: ], 2026-03-10T06:56:17.099 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "quorum_names": [ 2026-03-10T06:56:17.099 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "a" 2026-03-10T06:56:17.099 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: ], 2026-03-10T06:56:17.099 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "quorum_age": 3, 2026-03-10T06:56:17.099 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "monmap": { 2026-03-10T06:56:17.099 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-10T06:56:17.099 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "min_mon_release_name": "quincy", 2026-03-10T06:56:17.099 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "num_mons": 1 2026-03-10T06:56:17.099 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: }, 2026-03-10T06:56:17.099 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "osdmap": { 2026-03-10T06:56:17.099 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-10T06:56:17.099 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "num_osds": 0, 2026-03-10T06:56:17.099 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "num_up_osds": 0, 2026-03-10T06:56:17.099 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "osd_up_since": 0, 2026-03-10T06:56:17.099 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "num_in_osds": 0, 2026-03-10T06:56:17.099 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "osd_in_since": 0, 2026-03-10T06:56:17.099 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "num_remapped_pgs": 0 2026-03-10T06:56:17.099 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: }, 2026-03-10T06:56:17.099 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "pgmap": { 2026-03-10T06:56:17.099 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "pgs_by_state": [], 2026-03-10T06:56:17.099 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "num_pgs": 0, 2026-03-10T06:56:17.099 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "num_pools": 0, 2026-03-10T06:56:17.099 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "num_objects": 0, 2026-03-10T06:56:17.099 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "data_bytes": 0, 2026-03-10T06:56:17.099 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "bytes_used": 0, 2026-03-10T06:56:17.099 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "bytes_avail": 0, 2026-03-10T06:56:17.099 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "bytes_total": 0 2026-03-10T06:56:17.099 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: }, 2026-03-10T06:56:17.099 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "fsmap": { 2026-03-10T06:56:17.099 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-10T06:56:17.099 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "by_rank": [], 2026-03-10T06:56:17.099 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "up:standby": 0 2026-03-10T06:56:17.099 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: }, 2026-03-10T06:56:17.099 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "mgrmap": { 2026-03-10T06:56:17.099 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "available": false, 2026-03-10T06:56:17.099 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "num_standbys": 0, 2026-03-10T06:56:17.099 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "modules": [ 2026-03-10T06:56:17.099 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "iostat", 2026-03-10T06:56:17.099 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "nfs", 2026-03-10T06:56:17.099 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "restful" 2026-03-10T06:56:17.099 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: ], 2026-03-10T06:56:17.099 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "services": {} 2026-03-10T06:56:17.099 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: }, 2026-03-10T06:56:17.099 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "servicemap": { 2026-03-10T06:56:17.099 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-10T06:56:17.099 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "modified": "2026-03-10T06:56:12.128551+0000", 2026-03-10T06:56:17.099 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "services": {} 2026-03-10T06:56:17.099 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: }, 2026-03-10T06:56:17.099 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "progress_events": {} 2026-03-10T06:56:17.099 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: } 2026-03-10T06:56:17.136 INFO:teuthology.orchestra.run.vm02.stderr:mgr not available, waiting (2/15)... 2026-03-10T06:56:19.340 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: 2026-03-10T06:56:19.341 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: { 2026-03-10T06:56:19.341 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "fsid": "28bd35e6-1c4e-11f1-9057-21b3549603fc", 2026-03-10T06:56:19.341 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "health": { 2026-03-10T06:56:19.341 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "status": "HEALTH_OK", 2026-03-10T06:56:19.341 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "checks": {}, 2026-03-10T06:56:19.341 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "mutes": [] 2026-03-10T06:56:19.341 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: }, 2026-03-10T06:56:19.341 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "election_epoch": 5, 2026-03-10T06:56:19.341 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "quorum": [ 2026-03-10T06:56:19.341 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: 0 2026-03-10T06:56:19.341 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: ], 2026-03-10T06:56:19.341 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "quorum_names": [ 2026-03-10T06:56:19.341 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "a" 2026-03-10T06:56:19.341 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: ], 2026-03-10T06:56:19.341 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "quorum_age": 5, 2026-03-10T06:56:19.341 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "monmap": { 2026-03-10T06:56:19.341 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-10T06:56:19.341 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "min_mon_release_name": "quincy", 2026-03-10T06:56:19.341 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "num_mons": 1 2026-03-10T06:56:19.341 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: }, 2026-03-10T06:56:19.341 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "osdmap": { 2026-03-10T06:56:19.341 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-10T06:56:19.341 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "num_osds": 0, 2026-03-10T06:56:19.341 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "num_up_osds": 0, 2026-03-10T06:56:19.341 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "osd_up_since": 0, 2026-03-10T06:56:19.341 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "num_in_osds": 0, 2026-03-10T06:56:19.341 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "osd_in_since": 0, 2026-03-10T06:56:19.341 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "num_remapped_pgs": 0 2026-03-10T06:56:19.341 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: }, 2026-03-10T06:56:19.341 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "pgmap": { 2026-03-10T06:56:19.341 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "pgs_by_state": [], 2026-03-10T06:56:19.341 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "num_pgs": 0, 2026-03-10T06:56:19.341 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "num_pools": 0, 2026-03-10T06:56:19.341 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "num_objects": 0, 2026-03-10T06:56:19.341 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "data_bytes": 0, 2026-03-10T06:56:19.341 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "bytes_used": 0, 2026-03-10T06:56:19.341 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "bytes_avail": 0, 2026-03-10T06:56:19.341 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "bytes_total": 0 2026-03-10T06:56:19.341 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: }, 2026-03-10T06:56:19.341 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "fsmap": { 2026-03-10T06:56:19.341 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-10T06:56:19.341 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "by_rank": [], 2026-03-10T06:56:19.341 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "up:standby": 0 2026-03-10T06:56:19.341 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: }, 2026-03-10T06:56:19.341 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "mgrmap": { 2026-03-10T06:56:19.341 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "available": false, 2026-03-10T06:56:19.341 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "num_standbys": 0, 2026-03-10T06:56:19.341 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "modules": [ 2026-03-10T06:56:19.341 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "iostat", 2026-03-10T06:56:19.341 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "nfs", 2026-03-10T06:56:19.341 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "restful" 2026-03-10T06:56:19.341 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: ], 2026-03-10T06:56:19.341 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "services": {} 2026-03-10T06:56:19.342 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: }, 2026-03-10T06:56:19.342 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "servicemap": { 2026-03-10T06:56:19.342 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-10T06:56:19.342 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "modified": "2026-03-10T06:56:12.128551+0000", 2026-03-10T06:56:19.342 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "services": {} 2026-03-10T06:56:19.342 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: }, 2026-03-10T06:56:19.342 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "progress_events": {} 2026-03-10T06:56:19.342 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: } 2026-03-10T06:56:19.390 INFO:teuthology.orchestra.run.vm02.stderr:mgr not available, waiting (3/15)... 2026-03-10T06:56:21.680 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: 2026-03-10T06:56:21.680 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: { 2026-03-10T06:56:21.680 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "fsid": "28bd35e6-1c4e-11f1-9057-21b3549603fc", 2026-03-10T06:56:21.680 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "health": { 2026-03-10T06:56:21.680 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "status": "HEALTH_OK", 2026-03-10T06:56:21.680 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "checks": {}, 2026-03-10T06:56:21.680 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "mutes": [] 2026-03-10T06:56:21.680 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: }, 2026-03-10T06:56:21.680 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "election_epoch": 5, 2026-03-10T06:56:21.680 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "quorum": [ 2026-03-10T06:56:21.680 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: 0 2026-03-10T06:56:21.680 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: ], 2026-03-10T06:56:21.680 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "quorum_names": [ 2026-03-10T06:56:21.680 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "a" 2026-03-10T06:56:21.681 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: ], 2026-03-10T06:56:21.681 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "quorum_age": 7, 2026-03-10T06:56:21.681 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "monmap": { 2026-03-10T06:56:21.681 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-10T06:56:21.681 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "min_mon_release_name": "quincy", 2026-03-10T06:56:21.681 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "num_mons": 1 2026-03-10T06:56:21.681 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: }, 2026-03-10T06:56:21.681 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "osdmap": { 2026-03-10T06:56:21.681 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-10T06:56:21.681 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "num_osds": 0, 2026-03-10T06:56:21.681 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "num_up_osds": 0, 2026-03-10T06:56:21.681 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "osd_up_since": 0, 2026-03-10T06:56:21.681 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "num_in_osds": 0, 2026-03-10T06:56:21.681 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "osd_in_since": 0, 2026-03-10T06:56:21.681 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "num_remapped_pgs": 0 2026-03-10T06:56:21.681 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: }, 2026-03-10T06:56:21.681 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "pgmap": { 2026-03-10T06:56:21.681 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "pgs_by_state": [], 2026-03-10T06:56:21.681 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "num_pgs": 0, 2026-03-10T06:56:21.681 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "num_pools": 0, 2026-03-10T06:56:21.681 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "num_objects": 0, 2026-03-10T06:56:21.681 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "data_bytes": 0, 2026-03-10T06:56:21.681 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "bytes_used": 0, 2026-03-10T06:56:21.681 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "bytes_avail": 0, 2026-03-10T06:56:21.681 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "bytes_total": 0 2026-03-10T06:56:21.681 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: }, 2026-03-10T06:56:21.681 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "fsmap": { 2026-03-10T06:56:21.681 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-10T06:56:21.681 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "by_rank": [], 2026-03-10T06:56:21.681 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "up:standby": 0 2026-03-10T06:56:21.681 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: }, 2026-03-10T06:56:21.681 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "mgrmap": { 2026-03-10T06:56:21.681 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "available": true, 2026-03-10T06:56:21.681 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "num_standbys": 0, 2026-03-10T06:56:21.681 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "modules": [ 2026-03-10T06:56:21.681 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "iostat", 2026-03-10T06:56:21.681 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "nfs", 2026-03-10T06:56:21.681 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "restful" 2026-03-10T06:56:21.681 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: ], 2026-03-10T06:56:21.681 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "services": {} 2026-03-10T06:56:21.681 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: }, 2026-03-10T06:56:21.681 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "servicemap": { 2026-03-10T06:56:21.681 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-10T06:56:21.681 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "modified": "2026-03-10T06:56:12.128551+0000", 2026-03-10T06:56:21.681 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "services": {} 2026-03-10T06:56:21.681 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: }, 2026-03-10T06:56:21.681 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "progress_events": {} 2026-03-10T06:56:21.681 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: } 2026-03-10T06:56:21.722 INFO:teuthology.orchestra.run.vm02.stderr:mgr is available 2026-03-10T06:56:21.964 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: 2026-03-10T06:56:21.964 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: [global] 2026-03-10T06:56:21.965 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: fsid = 28bd35e6-1c4e-11f1-9057-21b3549603fc 2026-03-10T06:56:21.965 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: mon_osd_allow_pg_remap = true 2026-03-10T06:56:21.965 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: mon_osd_allow_primary_affinity = true 2026-03-10T06:56:21.965 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: mon_warn_on_no_sortbitwise = false 2026-03-10T06:56:21.965 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: osd_crush_chooseleaf_type = 0 2026-03-10T06:56:21.965 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: 2026-03-10T06:56:21.965 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: [mgr] 2026-03-10T06:56:21.965 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: mgr/telemetry/nag = false 2026-03-10T06:56:21.965 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: 2026-03-10T06:56:21.965 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: [osd] 2026-03-10T06:56:21.965 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: osd_map_max_advance = 10 2026-03-10T06:56:21.965 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: osd_mclock_iops_capacity_threshold_hdd = 49000 2026-03-10T06:56:21.965 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: osd_sloppy_crc = true 2026-03-10T06:56:22.003 INFO:teuthology.orchestra.run.vm02.stderr:Enabling cephadm module... 2026-03-10T06:56:23.353 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: { 2026-03-10T06:56:23.353 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "epoch": 5, 2026-03-10T06:56:23.353 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "available": true, 2026-03-10T06:56:23.353 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "active_name": "y", 2026-03-10T06:56:23.353 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "num_standby": 0 2026-03-10T06:56:23.353 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: } 2026-03-10T06:56:23.382 INFO:teuthology.orchestra.run.vm02.stderr:Waiting for the mgr to restart... 2026-03-10T06:56:23.383 INFO:teuthology.orchestra.run.vm02.stderr:Waiting for mgr epoch 5... 2026-03-10T06:56:28.545 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: { 2026-03-10T06:56:28.545 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "mgrmap_epoch": 7, 2026-03-10T06:56:28.545 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "initialized": true 2026-03-10T06:56:28.545 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: } 2026-03-10T06:56:28.572 INFO:teuthology.orchestra.run.vm02.stderr:mgr epoch 5 is available 2026-03-10T06:56:28.572 INFO:teuthology.orchestra.run.vm02.stderr:Setting orchestrator backend to cephadm... 2026-03-10T06:56:29.121 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: value unchanged 2026-03-10T06:56:29.158 INFO:teuthology.orchestra.run.vm02.stderr:Generating ssh key... 2026-03-10T06:56:29.818 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDSi6i9PXkLbGW9FWaiF6Yp7W9EyifKjSz+6yQE7pjFYbxVMeI/1rYrNqSz1+bo5BKx2E88AkVvLoqDKnh2JuUPX44mGg9t2Uhl5/oxSqJWPb1fFXUsH7tdoinE9MNHW4fpDJDPIrG1j5hGG1z1hl2g5wVf7bo2wT5OEYDf0qtBj5cY5nw2c/+ZOzQ1l6/wjea5t5kYn13mk/trg8oHTys+Or7h/NMpx8hPHnjtgBAUt8P0Y0pvXrZJFuL9F7iVh37lGqjqWia05+9zC+OmqH4DAVwbms7XbMOzlbntexLQYz2Mn7+S6TlagwEMvPIV6+h1rgXcWFSGVDvwFc510JuqE5QLakg7onq2bIbL16/oh9kZ3xCHlCvAsnHRI2jyBmqaUFgE0745u82gZT23mdLfobkPyHDfoCZxHqmNzRw1rKUHTk0dXNtvcPTReVXEBUqBEf7tevam3IHk4PRgQXDwhKsBrbQSIb07J0tNy5WANGuEq3UcxmXm3GFU9bnmHbs= ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc 2026-03-10T06:56:29.854 INFO:teuthology.orchestra.run.vm02.stderr:Wrote public SSH key to /home/ubuntu/cephtest/ceph.pub 2026-03-10T06:56:29.854 INFO:teuthology.orchestra.run.vm02.stderr:Adding key to root@localhost authorized_keys... 2026-03-10T06:56:29.854 INFO:teuthology.orchestra.run.vm02.stderr:Adding host vm02... 2026-03-10T06:56:30.799 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: Added host 'vm02' with addr '192.168.123.102' 2026-03-10T06:56:30.869 INFO:teuthology.orchestra.run.vm02.stderr:Deploying unmanaged mon service... 2026-03-10T06:56:31.209 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: Scheduled mon update... 2026-03-10T06:56:31.246 INFO:teuthology.orchestra.run.vm02.stderr:Deploying unmanaged mgr service... 2026-03-10T06:56:31.511 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: Scheduled mgr update... 2026-03-10T06:56:32.132 INFO:teuthology.orchestra.run.vm02.stderr:Enabling the dashboard module... 2026-03-10T06:56:33.575 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: { 2026-03-10T06:56:33.576 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "epoch": 9, 2026-03-10T06:56:33.576 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "available": true, 2026-03-10T06:56:33.576 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "active_name": "y", 2026-03-10T06:56:33.576 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "num_standby": 0 2026-03-10T06:56:33.576 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: } 2026-03-10T06:56:33.601 INFO:teuthology.orchestra.run.vm02.stderr:Waiting for the mgr to restart... 2026-03-10T06:56:33.601 INFO:teuthology.orchestra.run.vm02.stderr:Waiting for mgr epoch 9... 2026-03-10T06:56:38.412 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: { 2026-03-10T06:56:38.412 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "mgrmap_epoch": 11, 2026-03-10T06:56:38.412 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: "initialized": true 2026-03-10T06:56:38.412 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: } 2026-03-10T06:56:38.464 INFO:teuthology.orchestra.run.vm02.stderr:mgr epoch 9 is available 2026-03-10T06:56:38.465 INFO:teuthology.orchestra.run.vm02.stderr:Generating a dashboard self-signed certificate... 2026-03-10T06:56:38.877 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: Self-signed certificate created 2026-03-10T06:56:38.932 INFO:teuthology.orchestra.run.vm02.stderr:Creating initial admin user... 2026-03-10T06:56:39.518 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: {"username": "admin", "password": "$2b$12$m5Fy1lnXRD48er3nU9q8reqBGqmqNMCfYx2w02S5QSUcAogCfoIcS", "roles": ["administrator"], "name": null, "email": null, "lastUpdate": 1773125799, "enabled": true, "pwdExpirationDate": null, "pwdUpdateRequired": true} 2026-03-10T06:56:39.564 INFO:teuthology.orchestra.run.vm02.stderr:Fetching dashboard port number... 2026-03-10T06:56:39.823 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: 8443 2026-03-10T06:56:39.854 INFO:teuthology.orchestra.run.vm02.stderr:firewalld does not appear to be present 2026-03-10T06:56:39.854 INFO:teuthology.orchestra.run.vm02.stderr:Not possible to open ports <[8443]>. firewalld.service is not available 2026-03-10T06:56:39.856 INFO:teuthology.orchestra.run.vm02.stderr:Ceph Dashboard is now available at: 2026-03-10T06:56:39.856 INFO:teuthology.orchestra.run.vm02.stderr: 2026-03-10T06:56:39.856 INFO:teuthology.orchestra.run.vm02.stderr: URL: https://vm02.local:8443/ 2026-03-10T06:56:39.856 INFO:teuthology.orchestra.run.vm02.stderr: User: admin 2026-03-10T06:56:39.856 INFO:teuthology.orchestra.run.vm02.stderr: Password: bn7kdpqqjf 2026-03-10T06:56:39.856 INFO:teuthology.orchestra.run.vm02.stderr: 2026-03-10T06:56:39.856 INFO:teuthology.orchestra.run.vm02.stderr:Enabling autotune for osd_memory_target 2026-03-10T06:56:40.416 INFO:teuthology.orchestra.run.vm02.stderr:/usr/bin/ceph: set mgr/dashboard/cluster/status 2026-03-10T06:56:40.480 INFO:teuthology.orchestra.run.vm02.stderr:You can access the Ceph CLI with: 2026-03-10T06:56:40.480 INFO:teuthology.orchestra.run.vm02.stderr: 2026-03-10T06:56:40.480 INFO:teuthology.orchestra.run.vm02.stderr: sudo /home/ubuntu/cephtest/cephadm shell --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring 2026-03-10T06:56:40.480 INFO:teuthology.orchestra.run.vm02.stderr: 2026-03-10T06:56:40.480 INFO:teuthology.orchestra.run.vm02.stderr:Please consider enabling telemetry to help improve Ceph: 2026-03-10T06:56:40.480 INFO:teuthology.orchestra.run.vm02.stderr: 2026-03-10T06:56:40.480 INFO:teuthology.orchestra.run.vm02.stderr: ceph telemetry on 2026-03-10T06:56:40.480 INFO:teuthology.orchestra.run.vm02.stderr: 2026-03-10T06:56:40.480 INFO:teuthology.orchestra.run.vm02.stderr:For more information see: 2026-03-10T06:56:40.480 INFO:teuthology.orchestra.run.vm02.stderr: 2026-03-10T06:56:40.480 INFO:teuthology.orchestra.run.vm02.stderr: https://docs.ceph.com/docs/master/mgr/telemetry/ 2026-03-10T06:56:40.480 INFO:teuthology.orchestra.run.vm02.stderr: 2026-03-10T06:56:40.480 INFO:teuthology.orchestra.run.vm02.stderr:Bootstrap complete. 2026-03-10T06:56:40.510 INFO:tasks.cephadm:Fetching config... 2026-03-10T06:56:40.510 DEBUG:teuthology.orchestra.run.vm02:> set -ex 2026-03-10T06:56:40.510 DEBUG:teuthology.orchestra.run.vm02:> dd if=/etc/ceph/ceph.conf of=/dev/stdout 2026-03-10T06:56:40.557 INFO:tasks.cephadm:Fetching client.admin keyring... 2026-03-10T06:56:40.557 DEBUG:teuthology.orchestra.run.vm02:> set -ex 2026-03-10T06:56:40.557 DEBUG:teuthology.orchestra.run.vm02:> dd if=/etc/ceph/ceph.client.admin.keyring of=/dev/stdout 2026-03-10T06:56:40.576 INFO:tasks.cephadm:Fetching mon keyring... 2026-03-10T06:56:40.577 DEBUG:teuthology.orchestra.run.vm02:> set -ex 2026-03-10T06:56:40.577 DEBUG:teuthology.orchestra.run.vm02:> sudo dd if=/var/lib/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/mon.a/keyring of=/dev/stdout 2026-03-10T06:56:40.666 INFO:tasks.cephadm:Fetching pub ssh key... 2026-03-10T06:56:40.666 DEBUG:teuthology.orchestra.run.vm02:> set -ex 2026-03-10T06:56:40.666 DEBUG:teuthology.orchestra.run.vm02:> dd if=/home/ubuntu/cephtest/ceph.pub of=/dev/stdout 2026-03-10T06:56:40.735 INFO:tasks.cephadm:Installing pub ssh key for root users... 2026-03-10T06:56:40.736 DEBUG:teuthology.orchestra.run.vm02:> sudo install -d -m 0700 /root/.ssh && echo 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDSi6i9PXkLbGW9FWaiF6Yp7W9EyifKjSz+6yQE7pjFYbxVMeI/1rYrNqSz1+bo5BKx2E88AkVvLoqDKnh2JuUPX44mGg9t2Uhl5/oxSqJWPb1fFXUsH7tdoinE9MNHW4fpDJDPIrG1j5hGG1z1hl2g5wVf7bo2wT5OEYDf0qtBj5cY5nw2c/+ZOzQ1l6/wjea5t5kYn13mk/trg8oHTys+Or7h/NMpx8hPHnjtgBAUt8P0Y0pvXrZJFuL9F7iVh37lGqjqWia05+9zC+OmqH4DAVwbms7XbMOzlbntexLQYz2Mn7+S6TlagwEMvPIV6+h1rgXcWFSGVDvwFc510JuqE5QLakg7onq2bIbL16/oh9kZ3xCHlCvAsnHRI2jyBmqaUFgE0745u82gZT23mdLfobkPyHDfoCZxHqmNzRw1rKUHTk0dXNtvcPTReVXEBUqBEf7tevam3IHk4PRgQXDwhKsBrbQSIb07J0tNy5WANGuEq3UcxmXm3GFU9bnmHbs= ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc' | sudo tee -a /root/.ssh/authorized_keys && sudo chmod 0600 /root/.ssh/authorized_keys 2026-03-10T06:56:40.852 INFO:teuthology.orchestra.run.vm02.stdout:ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDSi6i9PXkLbGW9FWaiF6Yp7W9EyifKjSz+6yQE7pjFYbxVMeI/1rYrNqSz1+bo5BKx2E88AkVvLoqDKnh2JuUPX44mGg9t2Uhl5/oxSqJWPb1fFXUsH7tdoinE9MNHW4fpDJDPIrG1j5hGG1z1hl2g5wVf7bo2wT5OEYDf0qtBj5cY5nw2c/+ZOzQ1l6/wjea5t5kYn13mk/trg8oHTys+Or7h/NMpx8hPHnjtgBAUt8P0Y0pvXrZJFuL9F7iVh37lGqjqWia05+9zC+OmqH4DAVwbms7XbMOzlbntexLQYz2Mn7+S6TlagwEMvPIV6+h1rgXcWFSGVDvwFc510JuqE5QLakg7onq2bIbL16/oh9kZ3xCHlCvAsnHRI2jyBmqaUFgE0745u82gZT23mdLfobkPyHDfoCZxHqmNzRw1rKUHTk0dXNtvcPTReVXEBUqBEf7tevam3IHk4PRgQXDwhKsBrbQSIb07J0tNy5WANGuEq3UcxmXm3GFU9bnmHbs= ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc 2026-03-10T06:56:40.875 DEBUG:teuthology.orchestra.run.vm05:> sudo install -d -m 0700 /root/.ssh && echo 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDSi6i9PXkLbGW9FWaiF6Yp7W9EyifKjSz+6yQE7pjFYbxVMeI/1rYrNqSz1+bo5BKx2E88AkVvLoqDKnh2JuUPX44mGg9t2Uhl5/oxSqJWPb1fFXUsH7tdoinE9MNHW4fpDJDPIrG1j5hGG1z1hl2g5wVf7bo2wT5OEYDf0qtBj5cY5nw2c/+ZOzQ1l6/wjea5t5kYn13mk/trg8oHTys+Or7h/NMpx8hPHnjtgBAUt8P0Y0pvXrZJFuL9F7iVh37lGqjqWia05+9zC+OmqH4DAVwbms7XbMOzlbntexLQYz2Mn7+S6TlagwEMvPIV6+h1rgXcWFSGVDvwFc510JuqE5QLakg7onq2bIbL16/oh9kZ3xCHlCvAsnHRI2jyBmqaUFgE0745u82gZT23mdLfobkPyHDfoCZxHqmNzRw1rKUHTk0dXNtvcPTReVXEBUqBEf7tevam3IHk4PRgQXDwhKsBrbQSIb07J0tNy5WANGuEq3UcxmXm3GFU9bnmHbs= ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc' | sudo tee -a /root/.ssh/authorized_keys && sudo chmod 0600 /root/.ssh/authorized_keys 2026-03-10T06:56:40.912 INFO:teuthology.orchestra.run.vm05.stdout:ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDSi6i9PXkLbGW9FWaiF6Yp7W9EyifKjSz+6yQE7pjFYbxVMeI/1rYrNqSz1+bo5BKx2E88AkVvLoqDKnh2JuUPX44mGg9t2Uhl5/oxSqJWPb1fFXUsH7tdoinE9MNHW4fpDJDPIrG1j5hGG1z1hl2g5wVf7bo2wT5OEYDf0qtBj5cY5nw2c/+ZOzQ1l6/wjea5t5kYn13mk/trg8oHTys+Or7h/NMpx8hPHnjtgBAUt8P0Y0pvXrZJFuL9F7iVh37lGqjqWia05+9zC+OmqH4DAVwbms7XbMOzlbntexLQYz2Mn7+S6TlagwEMvPIV6+h1rgXcWFSGVDvwFc510JuqE5QLakg7onq2bIbL16/oh9kZ3xCHlCvAsnHRI2jyBmqaUFgE0745u82gZT23mdLfobkPyHDfoCZxHqmNzRw1rKUHTk0dXNtvcPTReVXEBUqBEf7tevam3IHk4PRgQXDwhKsBrbQSIb07J0tNy5WANGuEq3UcxmXm3GFU9bnmHbs= ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc 2026-03-10T06:56:40.923 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc -- ceph config set mgr mgr/cephadm/allow_ptrace true 2026-03-10T06:56:41.650 INFO:tasks.cephadm:Distributing conf and client.admin keyring to all hosts + 0755 2026-03-10T06:56:41.650 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc -- ceph orch client-keyring set client.admin '*' --mode 0755 2026-03-10T06:56:42.221 INFO:tasks.cephadm:Writing (initial) conf and keyring to vm05 2026-03-10T06:56:42.221 DEBUG:teuthology.orchestra.run.vm05:> set -ex 2026-03-10T06:56:42.221 DEBUG:teuthology.orchestra.run.vm05:> dd of=/etc/ceph/ceph.conf 2026-03-10T06:56:42.239 DEBUG:teuthology.orchestra.run.vm05:> set -ex 2026-03-10T06:56:42.239 DEBUG:teuthology.orchestra.run.vm05:> dd of=/etc/ceph/ceph.client.admin.keyring 2026-03-10T06:56:42.296 INFO:tasks.cephadm:Adding host vm05 to orchestrator... 2026-03-10T06:56:42.296 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc -- ceph orch host add vm05 2026-03-10T06:56:43.585 INFO:teuthology.orchestra.run.vm02.stdout:Added host 'vm05' with addr '192.168.123.105' 2026-03-10T06:56:43.636 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc -- ceph orch host ls --format=json 2026-03-10T06:56:44.162 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-10T06:56:44.162 INFO:teuthology.orchestra.run.vm02.stdout:[{"addr": "192.168.123.102", "hostname": "vm02", "labels": [], "status": ""}, {"addr": "192.168.123.105", "hostname": "vm05", "labels": [], "status": ""}] 2026-03-10T06:56:44.217 INFO:tasks.cephadm:Setting crush tunables to default 2026-03-10T06:56:44.217 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc -- ceph osd crush tunables default 2026-03-10T06:56:45.399 INFO:teuthology.orchestra.run.vm02.stderr:adjusted tunables profile to default 2026-03-10T06:56:45.449 INFO:tasks.cephadm:Adding mon.a on vm02 2026-03-10T06:56:45.449 INFO:tasks.cephadm:Adding mon.c on vm02 2026-03-10T06:56:45.449 INFO:tasks.cephadm:Adding mon.b on vm05 2026-03-10T06:56:45.449 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc -- ceph orch apply mon '3;vm02:192.168.123.102=a;vm02:[v2:192.168.123.102:3301,v1:192.168.123.102:6790]=c;vm05:192.168.123.105=b' 2026-03-10T06:56:45.987 INFO:teuthology.orchestra.run.vm05.stdout:Scheduled mon update... 2026-03-10T06:56:46.056 DEBUG:teuthology.orchestra.run.vm02:mon.c> sudo journalctl -f -n 0 -u ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@mon.c.service 2026-03-10T06:56:46.058 DEBUG:teuthology.orchestra.run.vm05:mon.b> sudo journalctl -f -n 0 -u ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@mon.b.service 2026-03-10T06:56:46.059 INFO:tasks.cephadm:Waiting for 3 mons in monmap... 2026-03-10T06:56:46.060 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc -- ceph mon dump -f json 2026-03-10T06:56:46.620 INFO:teuthology.orchestra.run.vm05.stdout: 2026-03-10T06:56:46.620 INFO:teuthology.orchestra.run.vm05.stdout:{"epoch":1,"fsid":"28bd35e6-1c4e-11f1-9057-21b3549603fc","modified":"2026-03-10T06:56:10.631622Z","created":"2026-03-10T06:56:10.631622Z","min_mon_release":17,"min_mon_release_name":"quincy","election_strategy":1,"disallowed_leaders: ":"","stretch_mode":false,"tiebreaker_mon":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy"],"optional":[]},"mons":[{"rank":0,"name":"a","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:3300","nonce":0},{"type":"v1","addr":"192.168.123.102:6789","nonce":0}]},"addr":"192.168.123.102:6789/0","public_addr":"192.168.123.102:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-10T06:56:46.623 INFO:teuthology.orchestra.run.vm05.stderr:dumped monmap epoch 1 2026-03-10T06:56:47.667 INFO:tasks.cephadm:Waiting for 3 mons in monmap... 2026-03-10T06:56:47.667 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc -- ceph mon dump -f json 2026-03-10T06:56:49.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:56:48 vm05 ceph-mon[48591]: mon.b@-1(synchronizing) e2 handle_conf_change mon_allow_pool_delete,mon_cluster_log_to_file 2026-03-10T06:56:52.757 INFO:teuthology.orchestra.run.vm05.stdout: 2026-03-10T06:56:52.757 INFO:teuthology.orchestra.run.vm05.stdout:{"epoch":2,"fsid":"28bd35e6-1c4e-11f1-9057-21b3549603fc","modified":"2026-03-10T06:56:47.520552Z","created":"2026-03-10T06:56:10.631622Z","min_mon_release":17,"min_mon_release_name":"quincy","election_strategy":1,"disallowed_leaders: ":"","stretch_mode":false,"tiebreaker_mon":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy"],"optional":[]},"mons":[{"rank":0,"name":"a","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:3300","nonce":0},{"type":"v1","addr":"192.168.123.102:6789","nonce":0}]},"addr":"192.168.123.102:6789/0","public_addr":"192.168.123.102:6789/0","priority":0,"weight":0,"crush_location":"{}"},{"rank":1,"name":"c","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:3301","nonce":0},{"type":"v1","addr":"192.168.123.102:6790","nonce":0}]},"addr":"192.168.123.102:6790/0","public_addr":"192.168.123.102:6790/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0,1]} 2026-03-10T06:56:52.766 INFO:teuthology.orchestra.run.vm05.stderr:dumped monmap epoch 2 2026-03-10T06:56:53.838 INFO:tasks.cephadm:Waiting for 3 mons in monmap... 2026-03-10T06:56:53.838 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc -- ceph mon dump -f json 2026-03-10T06:56:58.752 INFO:teuthology.orchestra.run.vm05.stdout: 2026-03-10T06:56:58.752 INFO:teuthology.orchestra.run.vm05.stdout:{"epoch":3,"fsid":"28bd35e6-1c4e-11f1-9057-21b3549603fc","modified":"2026-03-10T06:56:52.872880Z","created":"2026-03-10T06:56:10.631622Z","min_mon_release":17,"min_mon_release_name":"quincy","election_strategy":1,"disallowed_leaders: ":"","stretch_mode":false,"tiebreaker_mon":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy"],"optional":[]},"mons":[{"rank":0,"name":"a","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:3300","nonce":0},{"type":"v1","addr":"192.168.123.102:6789","nonce":0}]},"addr":"192.168.123.102:6789/0","public_addr":"192.168.123.102:6789/0","priority":0,"weight":0,"crush_location":"{}"},{"rank":1,"name":"c","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:3301","nonce":0},{"type":"v1","addr":"192.168.123.102:6790","nonce":0}]},"addr":"192.168.123.102:6790/0","public_addr":"192.168.123.102:6790/0","priority":0,"weight":0,"crush_location":"{}"},{"rank":2,"name":"b","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:3300","nonce":0},{"type":"v1","addr":"192.168.123.105:6789","nonce":0}]},"addr":"192.168.123.105:6789/0","public_addr":"192.168.123.105:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0,1]} 2026-03-10T06:56:58.755 INFO:teuthology.orchestra.run.vm05.stderr:dumped monmap epoch 3 2026-03-10T06:56:58.798 INFO:tasks.cephadm:Generating final ceph.conf file... 2026-03-10T06:56:58.798 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc -- ceph config generate-minimal-conf 2026-03-10T06:56:59.468 INFO:teuthology.orchestra.run.vm02.stdout:# minimal ceph.conf for 28bd35e6-1c4e-11f1-9057-21b3549603fc 2026-03-10T06:56:59.468 INFO:teuthology.orchestra.run.vm02.stdout:[global] 2026-03-10T06:56:59.468 INFO:teuthology.orchestra.run.vm02.stdout: fsid = 28bd35e6-1c4e-11f1-9057-21b3549603fc 2026-03-10T06:56:59.468 INFO:teuthology.orchestra.run.vm02.stdout: mon_host = [v2:192.168.123.102:3300/0,v1:192.168.123.102:6789/0] [v2:192.168.123.105:3300/0,v1:192.168.123.105:6789/0] [v2:192.168.123.102:3301/0,v1:192.168.123.102:6790/0] 2026-03-10T06:56:59.547 INFO:tasks.cephadm:Distributing (final) config and client.admin keyring... 2026-03-10T06:56:59.547 DEBUG:teuthology.orchestra.run.vm02:> set -ex 2026-03-10T06:56:59.547 DEBUG:teuthology.orchestra.run.vm02:> sudo dd of=/etc/ceph/ceph.conf 2026-03-10T06:56:59.578 DEBUG:teuthology.orchestra.run.vm02:> set -ex 2026-03-10T06:56:59.579 DEBUG:teuthology.orchestra.run.vm02:> sudo dd of=/etc/ceph/ceph.client.admin.keyring 2026-03-10T06:56:59.647 DEBUG:teuthology.orchestra.run.vm05:> set -ex 2026-03-10T06:56:59.647 DEBUG:teuthology.orchestra.run.vm05:> sudo dd of=/etc/ceph/ceph.conf 2026-03-10T06:56:59.674 DEBUG:teuthology.orchestra.run.vm05:> set -ex 2026-03-10T06:56:59.674 DEBUG:teuthology.orchestra.run.vm05:> sudo dd of=/etc/ceph/ceph.client.admin.keyring 2026-03-10T06:56:59.737 INFO:tasks.cephadm:Adding mgr.y on vm02 2026-03-10T06:56:59.737 INFO:tasks.cephadm:Adding mgr.x on vm05 2026-03-10T06:56:59.737 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc -- ceph orch apply mgr '2;vm02=y;vm05=x' 2026-03-10T06:56:59.941 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:56:59 vm05 ceph-mon[48591]: Deploying daemon mon.b on vm05 2026-03-10T06:56:59.942 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:56:59 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T06:56:59.942 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:56:59 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T06:56:59.942 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:56:59 vm05 ceph-mon[48591]: mon.a calling monitor election 2026-03-10T06:56:59.942 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:56:59 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T06:56:59.942 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:56:59 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T06:56:59.942 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:56:59 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T06:56:59.942 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:56:59 vm05 ceph-mon[48591]: mon.c calling monitor election 2026-03-10T06:56:59.942 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:56:59 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T06:56:59.942 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:56:59 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T06:56:59.942 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:56:59 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T06:56:59.942 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:56:59 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T06:56:59.942 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:56:59 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T06:56:59.942 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:56:59 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T06:56:59.942 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:56:59 vm05 ceph-mon[48591]: mon.a is new leader, mons a,c in quorum (ranks 0,1) 2026-03-10T06:56:59.942 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:56:59 vm05 ceph-mon[48591]: monmap e2: 2 mons at {a=[v2:192.168.123.102:3300/0,v1:192.168.123.102:6789/0],c=[v2:192.168.123.102:3301/0,v1:192.168.123.102:6790/0]} 2026-03-10T06:56:59.942 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:56:59 vm05 ceph-mon[48591]: fsmap 2026-03-10T06:56:59.942 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:56:59 vm05 ceph-mon[48591]: osdmap e4: 0 total, 0 up, 0 in 2026-03-10T06:56:59.942 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:56:59 vm05 ceph-mon[48591]: mgrmap e13: y(active, since 15s) 2026-03-10T06:56:59.942 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:56:59 vm05 ceph-mon[48591]: overall HEALTH_OK 2026-03-10T06:56:59.942 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:56:59 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:56:59.942 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:56:59 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:56:59.942 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:56:59 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:56:59.942 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:56:59 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:56:59.942 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:56:59 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T06:56:59.942 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:56:59 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T06:56:59.942 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:56:59 vm05 ceph-mon[48591]: mon.c calling monitor election 2026-03-10T06:56:59.942 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:56:59 vm05 ceph-mon[48591]: mon.a calling monitor election 2026-03-10T06:56:59.942 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:56:59 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T06:56:59.942 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:56:59 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T06:56:59.942 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:56:59 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T06:56:59.942 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:56:59 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T06:56:59.942 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:56:59 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T06:56:59.942 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:56:59 vm05 ceph-mon[48591]: mon.a is new leader, mons a,c in quorum (ranks 0,1) 2026-03-10T06:56:59.942 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:56:59 vm05 ceph-mon[48591]: monmap e3: 3 mons at {a=[v2:192.168.123.102:3300/0,v1:192.168.123.102:6789/0],b=[v2:192.168.123.105:3300/0,v1:192.168.123.105:6789/0],c=[v2:192.168.123.102:3301/0,v1:192.168.123.102:6790/0]} 2026-03-10T06:56:59.942 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:56:59 vm05 ceph-mon[48591]: fsmap 2026-03-10T06:56:59.942 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:56:59 vm05 ceph-mon[48591]: osdmap e4: 0 total, 0 up, 0 in 2026-03-10T06:56:59.942 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:56:59 vm05 ceph-mon[48591]: mgrmap e13: y(active, since 20s) 2026-03-10T06:56:59.942 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:56:59 vm05 ceph-mon[48591]: overall HEALTH_OK 2026-03-10T06:56:59.942 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:56:59 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:56:59.942 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:56:59 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:56:59.942 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:56:59 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:56:59.942 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:56:59 vm05 ceph-mon[48591]: pgmap v4: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T06:56:59.942 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:56:59 vm05 ceph-mon[48591]: Updating vm05:/etc/ceph/ceph.conf 2026-03-10T06:56:59.942 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:56:59 vm05 ceph-mon[48591]: Updating vm02:/etc/ceph/ceph.conf 2026-03-10T06:56:59.942 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:56:59 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:56:59.942 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:56:59 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:56:59.942 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:56:59 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:56:59.942 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:56:59 vm05 ceph-mon[48591]: Reconfiguring mon.a (unknown last config time)... 2026-03-10T06:56:59.942 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:56:59 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-10T06:56:59.942 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:56:59 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-10T06:56:59.942 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:56:59 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:56:59.942 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:56:59 vm05 ceph-mon[48591]: Reconfiguring daemon mon.a on vm02 2026-03-10T06:56:59.942 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:56:59 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:56:59.942 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:56:59 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-10T06:56:59.942 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:56:59 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-10T06:56:59.942 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:56:59 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:56:59.942 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:56:59 vm05 ceph-mon[48591]: from='client.? 192.168.123.105:0/899955247' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-10T06:56:59.942 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:56:59 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:56:59.943 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:56:59 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-10T06:56:59.943 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:56:59 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-10T06:56:59.943 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:56:59 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:56:59.943 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:56:59 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T06:57:00.245 INFO:teuthology.orchestra.run.vm05.stdout:Scheduled mgr update... 2026-03-10T06:57:00.312 DEBUG:teuthology.orchestra.run.vm05:mgr.x> sudo journalctl -f -n 0 -u ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@mgr.x.service 2026-03-10T06:57:00.313 INFO:tasks.cephadm:Deploying OSDs... 2026-03-10T06:57:00.313 DEBUG:teuthology.orchestra.run.vm02:> set -ex 2026-03-10T06:57:00.313 DEBUG:teuthology.orchestra.run.vm02:> dd if=/scratch_devs of=/dev/stdout 2026-03-10T06:57:00.333 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-10T06:57:00.333 DEBUG:teuthology.orchestra.run.vm02:> ls /dev/[sv]d? 2026-03-10T06:57:00.395 INFO:teuthology.orchestra.run.vm02.stdout:/dev/vda 2026-03-10T06:57:00.395 INFO:teuthology.orchestra.run.vm02.stdout:/dev/vdb 2026-03-10T06:57:00.395 INFO:teuthology.orchestra.run.vm02.stdout:/dev/vdc 2026-03-10T06:57:00.395 INFO:teuthology.orchestra.run.vm02.stdout:/dev/vdd 2026-03-10T06:57:00.395 INFO:teuthology.orchestra.run.vm02.stdout:/dev/vde 2026-03-10T06:57:00.395 WARNING:teuthology.misc:Removing root device: /dev/vda from device list 2026-03-10T06:57:00.395 DEBUG:teuthology.misc:devs=['/dev/vdb', '/dev/vdc', '/dev/vdd', '/dev/vde'] 2026-03-10T06:57:00.395 DEBUG:teuthology.orchestra.run.vm02:> stat /dev/vdb 2026-03-10T06:57:00.454 INFO:teuthology.orchestra.run.vm02.stdout: File: /dev/vdb 2026-03-10T06:57:00.454 INFO:teuthology.orchestra.run.vm02.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-10T06:57:00.454 INFO:teuthology.orchestra.run.vm02.stdout:Device: 6h/6d Inode: 254 Links: 1 Device type: fc,10 2026-03-10T06:57:00.455 INFO:teuthology.orchestra.run.vm02.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-10T06:57:00.455 INFO:teuthology.orchestra.run.vm02.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-10T06:57:00.455 INFO:teuthology.orchestra.run.vm02.stdout:Access: 2026-03-10 06:56:41.139221754 +0000 2026-03-10T06:57:00.455 INFO:teuthology.orchestra.run.vm02.stdout:Modify: 2026-03-10 06:56:40.871221468 +0000 2026-03-10T06:57:00.455 INFO:teuthology.orchestra.run.vm02.stdout:Change: 2026-03-10 06:56:40.871221468 +0000 2026-03-10T06:57:00.455 INFO:teuthology.orchestra.run.vm02.stdout: Birth: 2026-03-10 06:53:06.318000000 +0000 2026-03-10T06:57:00.455 DEBUG:teuthology.orchestra.run.vm02:> sudo dd if=/dev/vdb of=/dev/null count=1 2026-03-10T06:57:00.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:00 vm05 ceph-mon[48591]: mon.b calling monitor election 2026-03-10T06:57:00.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:00 vm05 ceph-mon[48591]: pgmap v5: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T06:57:00.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:00 vm05 ceph-mon[48591]: mon.b calling monitor election 2026-03-10T06:57:00.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:00 vm05 ceph-mon[48591]: mon.c calling monitor election 2026-03-10T06:57:00.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:00 vm05 ceph-mon[48591]: mon.a calling monitor election 2026-03-10T06:57:00.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:00 vm05 ceph-mon[48591]: mon.a is new leader, mons a,c,b in quorum (ranks 0,1,2) 2026-03-10T06:57:00.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:00 vm05 ceph-mon[48591]: monmap e3: 3 mons at {a=[v2:192.168.123.102:3300/0,v1:192.168.123.102:6789/0],b=[v2:192.168.123.105:3300/0,v1:192.168.123.105:6789/0],c=[v2:192.168.123.102:3301/0,v1:192.168.123.102:6790/0]} 2026-03-10T06:57:00.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:00 vm05 ceph-mon[48591]: fsmap 2026-03-10T06:57:00.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:00 vm05 ceph-mon[48591]: osdmap e4: 0 total, 0 up, 0 in 2026-03-10T06:57:00.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:00 vm05 ceph-mon[48591]: mgrmap e13: y(active, since 22s) 2026-03-10T06:57:00.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:00 vm05 ceph-mon[48591]: overall HEALTH_OK 2026-03-10T06:57:00.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:00 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:00.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:00 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:57:00.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:00 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:57:00.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:00 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:57:00.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:00 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:00.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:00 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T06:57:00.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:00 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd='[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]': finished 2026-03-10T06:57:00.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:00 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-10T06:57:00.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:00 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:57:00.523 INFO:teuthology.orchestra.run.vm02.stderr:1+0 records in 2026-03-10T06:57:00.523 INFO:teuthology.orchestra.run.vm02.stderr:1+0 records out 2026-03-10T06:57:00.523 INFO:teuthology.orchestra.run.vm02.stderr:512 bytes copied, 0.000170941 s, 3.0 MB/s 2026-03-10T06:57:00.523 DEBUG:teuthology.orchestra.run.vm02:> ! mount | grep -v devtmpfs | grep -q /dev/vdb 2026-03-10T06:57:00.586 DEBUG:teuthology.orchestra.run.vm02:> stat /dev/vdc 2026-03-10T06:57:00.647 INFO:teuthology.orchestra.run.vm02.stdout: File: /dev/vdc 2026-03-10T06:57:00.647 INFO:teuthology.orchestra.run.vm02.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-10T06:57:00.647 INFO:teuthology.orchestra.run.vm02.stdout:Device: 6h/6d Inode: 255 Links: 1 Device type: fc,20 2026-03-10T06:57:00.648 INFO:teuthology.orchestra.run.vm02.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-10T06:57:00.648 INFO:teuthology.orchestra.run.vm02.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-10T06:57:00.648 INFO:teuthology.orchestra.run.vm02.stdout:Access: 2026-03-10 06:56:41.225221845 +0000 2026-03-10T06:57:00.648 INFO:teuthology.orchestra.run.vm02.stdout:Modify: 2026-03-10 06:56:40.858221455 +0000 2026-03-10T06:57:00.648 INFO:teuthology.orchestra.run.vm02.stdout:Change: 2026-03-10 06:56:40.858221455 +0000 2026-03-10T06:57:00.648 INFO:teuthology.orchestra.run.vm02.stdout: Birth: 2026-03-10 06:53:06.323000000 +0000 2026-03-10T06:57:00.648 DEBUG:teuthology.orchestra.run.vm02:> sudo dd if=/dev/vdc of=/dev/null count=1 2026-03-10T06:57:00.722 INFO:teuthology.orchestra.run.vm02.stderr:1+0 records in 2026-03-10T06:57:00.722 INFO:teuthology.orchestra.run.vm02.stderr:1+0 records out 2026-03-10T06:57:00.722 INFO:teuthology.orchestra.run.vm02.stderr:512 bytes copied, 0.000125074 s, 4.1 MB/s 2026-03-10T06:57:00.722 DEBUG:teuthology.orchestra.run.vm02:> ! mount | grep -v devtmpfs | grep -q /dev/vdc 2026-03-10T06:57:00.782 DEBUG:teuthology.orchestra.run.vm02:> stat /dev/vdd 2026-03-10T06:57:00.843 INFO:teuthology.orchestra.run.vm02.stdout: File: /dev/vdd 2026-03-10T06:57:00.843 INFO:teuthology.orchestra.run.vm02.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-10T06:57:00.843 INFO:teuthology.orchestra.run.vm02.stdout:Device: 6h/6d Inode: 256 Links: 1 Device type: fc,30 2026-03-10T06:57:00.843 INFO:teuthology.orchestra.run.vm02.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-10T06:57:00.843 INFO:teuthology.orchestra.run.vm02.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-10T06:57:00.843 INFO:teuthology.orchestra.run.vm02.stdout:Access: 2026-03-10 06:56:41.304221929 +0000 2026-03-10T06:57:00.843 INFO:teuthology.orchestra.run.vm02.stdout:Modify: 2026-03-10 06:56:40.874221472 +0000 2026-03-10T06:57:00.843 INFO:teuthology.orchestra.run.vm02.stdout:Change: 2026-03-10 06:56:40.874221472 +0000 2026-03-10T06:57:00.843 INFO:teuthology.orchestra.run.vm02.stdout: Birth: 2026-03-10 06:53:06.333000000 +0000 2026-03-10T06:57:00.843 DEBUG:teuthology.orchestra.run.vm02:> sudo dd if=/dev/vdd of=/dev/null count=1 2026-03-10T06:57:00.912 INFO:teuthology.orchestra.run.vm02.stderr:1+0 records in 2026-03-10T06:57:00.912 INFO:teuthology.orchestra.run.vm02.stderr:1+0 records out 2026-03-10T06:57:00.912 INFO:teuthology.orchestra.run.vm02.stderr:512 bytes copied, 0.000158457 s, 3.2 MB/s 2026-03-10T06:57:00.913 DEBUG:teuthology.orchestra.run.vm02:> ! mount | grep -v devtmpfs | grep -q /dev/vdd 2026-03-10T06:57:00.975 DEBUG:teuthology.orchestra.run.vm02:> stat /dev/vde 2026-03-10T06:57:01.035 INFO:teuthology.orchestra.run.vm02.stdout: File: /dev/vde 2026-03-10T06:57:01.035 INFO:teuthology.orchestra.run.vm02.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-10T06:57:01.035 INFO:teuthology.orchestra.run.vm02.stdout:Device: 6h/6d Inode: 257 Links: 1 Device type: fc,40 2026-03-10T06:57:01.035 INFO:teuthology.orchestra.run.vm02.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-10T06:57:01.035 INFO:teuthology.orchestra.run.vm02.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-10T06:57:01.035 INFO:teuthology.orchestra.run.vm02.stdout:Access: 2026-03-10 06:56:41.419222052 +0000 2026-03-10T06:57:01.035 INFO:teuthology.orchestra.run.vm02.stdout:Modify: 2026-03-10 06:56:40.855221451 +0000 2026-03-10T06:57:01.035 INFO:teuthology.orchestra.run.vm02.stdout:Change: 2026-03-10 06:56:40.855221451 +0000 2026-03-10T06:57:01.035 INFO:teuthology.orchestra.run.vm02.stdout: Birth: 2026-03-10 06:53:06.344000000 +0000 2026-03-10T06:57:01.035 DEBUG:teuthology.orchestra.run.vm02:> sudo dd if=/dev/vde of=/dev/null count=1 2026-03-10T06:57:01.103 INFO:teuthology.orchestra.run.vm02.stderr:1+0 records in 2026-03-10T06:57:01.104 INFO:teuthology.orchestra.run.vm02.stderr:1+0 records out 2026-03-10T06:57:01.104 INFO:teuthology.orchestra.run.vm02.stderr:512 bytes copied, 0.000145733 s, 3.5 MB/s 2026-03-10T06:57:01.104 DEBUG:teuthology.orchestra.run.vm02:> ! mount | grep -v devtmpfs | grep -q /dev/vde 2026-03-10T06:57:01.135 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 06:57:01 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[49719]: 2026-03-10T06:57:01.135+0000 7f754e0fc000 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-10T06:57:01.166 DEBUG:teuthology.orchestra.run.vm05:> set -ex 2026-03-10T06:57:01.166 DEBUG:teuthology.orchestra.run.vm05:> dd if=/scratch_devs of=/dev/stdout 2026-03-10T06:57:01.210 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-10T06:57:01.210 DEBUG:teuthology.orchestra.run.vm05:> ls /dev/[sv]d? 2026-03-10T06:57:01.292 INFO:teuthology.orchestra.run.vm05.stdout:/dev/vda 2026-03-10T06:57:01.292 INFO:teuthology.orchestra.run.vm05.stdout:/dev/vdb 2026-03-10T06:57:01.292 INFO:teuthology.orchestra.run.vm05.stdout:/dev/vdc 2026-03-10T06:57:01.292 INFO:teuthology.orchestra.run.vm05.stdout:/dev/vdd 2026-03-10T06:57:01.292 INFO:teuthology.orchestra.run.vm05.stdout:/dev/vde 2026-03-10T06:57:01.292 WARNING:teuthology.misc:Removing root device: /dev/vda from device list 2026-03-10T06:57:01.292 DEBUG:teuthology.misc:devs=['/dev/vdb', '/dev/vdc', '/dev/vdd', '/dev/vde'] 2026-03-10T06:57:01.292 DEBUG:teuthology.orchestra.run.vm05:> stat /dev/vdb 2026-03-10T06:57:01.367 INFO:teuthology.orchestra.run.vm05.stdout: File: /dev/vdb 2026-03-10T06:57:01.367 INFO:teuthology.orchestra.run.vm05.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-10T06:57:01.367 INFO:teuthology.orchestra.run.vm05.stdout:Device: 6h/6d Inode: 223 Links: 1 Device type: fc,10 2026-03-10T06:57:01.367 INFO:teuthology.orchestra.run.vm05.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-10T06:57:01.367 INFO:teuthology.orchestra.run.vm05.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-10T06:57:01.367 INFO:teuthology.orchestra.run.vm05.stdout:Access: 2026-03-10 06:56:45.173374804 +0000 2026-03-10T06:57:01.367 INFO:teuthology.orchestra.run.vm05.stdout:Modify: 2026-03-10 06:56:44.870374313 +0000 2026-03-10T06:57:01.367 INFO:teuthology.orchestra.run.vm05.stdout:Change: 2026-03-10 06:56:44.870374313 +0000 2026-03-10T06:57:01.367 INFO:teuthology.orchestra.run.vm05.stdout: Birth: 2026-03-10 06:52:41.222000000 +0000 2026-03-10T06:57:01.367 DEBUG:teuthology.orchestra.run.vm05:> sudo dd if=/dev/vdb of=/dev/null count=1 2026-03-10T06:57:01.456 INFO:teuthology.orchestra.run.vm05.stderr:1+0 records in 2026-03-10T06:57:01.456 INFO:teuthology.orchestra.run.vm05.stderr:1+0 records out 2026-03-10T06:57:01.456 INFO:teuthology.orchestra.run.vm05.stderr:512 bytes copied, 0.000156644 s, 3.3 MB/s 2026-03-10T06:57:01.457 DEBUG:teuthology.orchestra.run.vm05:> ! mount | grep -v devtmpfs | grep -q /dev/vdb 2026-03-10T06:57:01.490 DEBUG:teuthology.orchestra.run.vm05:> stat /dev/vdc 2026-03-10T06:57:01.549 INFO:teuthology.orchestra.run.vm05.stdout: File: /dev/vdc 2026-03-10T06:57:01.549 INFO:teuthology.orchestra.run.vm05.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-10T06:57:01.549 INFO:teuthology.orchestra.run.vm05.stdout:Device: 6h/6d Inode: 224 Links: 1 Device type: fc,20 2026-03-10T06:57:01.549 INFO:teuthology.orchestra.run.vm05.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-10T06:57:01.549 INFO:teuthology.orchestra.run.vm05.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-10T06:57:01.549 INFO:teuthology.orchestra.run.vm05.stdout:Access: 2026-03-10 06:56:45.248374926 +0000 2026-03-10T06:57:01.549 INFO:teuthology.orchestra.run.vm05.stdout:Modify: 2026-03-10 06:56:44.869374312 +0000 2026-03-10T06:57:01.550 INFO:teuthology.orchestra.run.vm05.stdout:Change: 2026-03-10 06:56:44.869374312 +0000 2026-03-10T06:57:01.550 INFO:teuthology.orchestra.run.vm05.stdout: Birth: 2026-03-10 06:52:41.230000000 +0000 2026-03-10T06:57:01.550 DEBUG:teuthology.orchestra.run.vm05:> sudo dd if=/dev/vdc of=/dev/null count=1 2026-03-10T06:57:01.623 INFO:teuthology.orchestra.run.vm05.stderr:1+0 records in 2026-03-10T06:57:01.623 INFO:teuthology.orchestra.run.vm05.stderr:1+0 records out 2026-03-10T06:57:01.623 INFO:teuthology.orchestra.run.vm05.stderr:512 bytes copied, 0.000259807 s, 2.0 MB/s 2026-03-10T06:57:01.624 DEBUG:teuthology.orchestra.run.vm05:> ! mount | grep -v devtmpfs | grep -q /dev/vdc 2026-03-10T06:57:01.684 DEBUG:teuthology.orchestra.run.vm05:> stat /dev/vdd 2026-03-10T06:57:01.749 INFO:teuthology.orchestra.run.vm05.stdout: File: /dev/vdd 2026-03-10T06:57:01.749 INFO:teuthology.orchestra.run.vm05.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-10T06:57:01.749 INFO:teuthology.orchestra.run.vm05.stdout:Device: 6h/6d Inode: 225 Links: 1 Device type: fc,30 2026-03-10T06:57:01.749 INFO:teuthology.orchestra.run.vm05.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-10T06:57:01.749 INFO:teuthology.orchestra.run.vm05.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-10T06:57:01.749 INFO:teuthology.orchestra.run.vm05.stdout:Access: 2026-03-10 06:56:45.326375052 +0000 2026-03-10T06:57:01.749 INFO:teuthology.orchestra.run.vm05.stdout:Modify: 2026-03-10 06:56:44.874374320 +0000 2026-03-10T06:57:01.749 INFO:teuthology.orchestra.run.vm05.stdout:Change: 2026-03-10 06:56:44.874374320 +0000 2026-03-10T06:57:01.749 INFO:teuthology.orchestra.run.vm05.stdout: Birth: 2026-03-10 06:52:41.234000000 +0000 2026-03-10T06:57:01.749 DEBUG:teuthology.orchestra.run.vm05:> sudo dd if=/dev/vdd of=/dev/null count=1 2026-03-10T06:57:01.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:01 vm05 ceph-mon[48591]: from='client.14208 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mgr", "placement": "2;vm02=y;vm05=x", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T06:57:01.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:01 vm05 ceph-mon[48591]: Saving service mgr spec with placement vm02=y;vm05=x;count:2 2026-03-10T06:57:01.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:01 vm05 ceph-mon[48591]: Deploying daemon mgr.x on vm05 2026-03-10T06:57:01.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:01 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T06:57:01.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:01 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:01.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:01 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:57:01.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:01 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:57:01.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:01 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:57:01.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:01 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:01.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:01 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:01.753 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 06:57:01 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[49719]: 2026-03-10T06:57:01.564+0000 7f754e0fc000 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-10T06:57:01.776 INFO:teuthology.orchestra.run.vm05.stderr:1+0 records in 2026-03-10T06:57:01.776 INFO:teuthology.orchestra.run.vm05.stderr:1+0 records out 2026-03-10T06:57:01.776 INFO:teuthology.orchestra.run.vm05.stderr:512 bytes copied, 0.000169778 s, 3.0 MB/s 2026-03-10T06:57:01.777 DEBUG:teuthology.orchestra.run.vm05:> ! mount | grep -v devtmpfs | grep -q /dev/vdd 2026-03-10T06:57:01.835 DEBUG:teuthology.orchestra.run.vm05:> stat /dev/vde 2026-03-10T06:57:01.895 INFO:teuthology.orchestra.run.vm05.stdout: File: /dev/vde 2026-03-10T06:57:01.895 INFO:teuthology.orchestra.run.vm05.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-10T06:57:01.895 INFO:teuthology.orchestra.run.vm05.stdout:Device: 6h/6d Inode: 229 Links: 1 Device type: fc,40 2026-03-10T06:57:01.895 INFO:teuthology.orchestra.run.vm05.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-10T06:57:01.895 INFO:teuthology.orchestra.run.vm05.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-10T06:57:01.895 INFO:teuthology.orchestra.run.vm05.stdout:Access: 2026-03-10 06:56:45.405375180 +0000 2026-03-10T06:57:01.895 INFO:teuthology.orchestra.run.vm05.stdout:Modify: 2026-03-10 06:56:44.874374320 +0000 2026-03-10T06:57:01.895 INFO:teuthology.orchestra.run.vm05.stdout:Change: 2026-03-10 06:56:44.874374320 +0000 2026-03-10T06:57:01.895 INFO:teuthology.orchestra.run.vm05.stdout: Birth: 2026-03-10 06:52:41.237000000 +0000 2026-03-10T06:57:01.895 DEBUG:teuthology.orchestra.run.vm05:> sudo dd if=/dev/vde of=/dev/null count=1 2026-03-10T06:57:01.968 INFO:teuthology.orchestra.run.vm05.stderr:1+0 records in 2026-03-10T06:57:01.969 INFO:teuthology.orchestra.run.vm05.stderr:1+0 records out 2026-03-10T06:57:01.969 INFO:teuthology.orchestra.run.vm05.stderr:512 bytes copied, 0.000167955 s, 3.0 MB/s 2026-03-10T06:57:01.970 DEBUG:teuthology.orchestra.run.vm05:> ! mount | grep -v devtmpfs | grep -q /dev/vde 2026-03-10T06:57:02.031 INFO:tasks.cephadm:Deploying osd.0 on vm02 with /dev/vde... 2026-03-10T06:57:02.031 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc -- lvm zap /dev/vde 2026-03-10T06:57:02.075 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 06:57:01 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[49719]: 2026-03-10T06:57:01.931+0000 7f754e0fc000 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-10T06:57:02.076 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 06:57:02 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[49719]: 2026-03-10T06:57:02.075+0000 7f754e0fc000 -1 mgr[py] Module diskprediction_local has missing NOTIFY_TYPES member 2026-03-10T06:57:02.495 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 06:57:02 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[49719]: 2026-03-10T06:57:02.127+0000 7f754e0fc000 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-10T06:57:02.495 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 06:57:02 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[49719]: 2026-03-10T06:57:02.294+0000 7f754e0fc000 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-10T06:57:02.725 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-10T06:57:02.741 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc -- ceph orch daemon add osd vm02:/dev/vde 2026-03-10T06:57:02.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:02 vm05 ceph-mon[48591]: pgmap v6: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T06:57:02.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:02 vm05 ceph-mon[48591]: Reconfiguring mgr.y (unknown last config time)... 2026-03-10T06:57:02.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:02 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.y", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T06:57:02.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:02 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-10T06:57:02.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:02 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:57:02.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:02 vm05 ceph-mon[48591]: Reconfiguring daemon mgr.y on vm02 2026-03-10T06:57:02.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:02 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:02.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:02 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:57:02.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:02 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:57:02.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:02 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:57:02.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:02 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:03.180 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 06:57:02 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[49719]: 2026-03-10T06:57:02.882+0000 7f754e0fc000 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-10T06:57:03.180 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 06:57:03 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[49719]: 2026-03-10T06:57:03.067+0000 7f754e0fc000 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-10T06:57:03.180 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 06:57:03 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[49719]: 2026-03-10T06:57:03.125+0000 7f754e0fc000 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-10T06:57:03.503 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 06:57:03 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[49719]: 2026-03-10T06:57:03.179+0000 7f754e0fc000 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-10T06:57:03.503 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 06:57:03 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[49719]: 2026-03-10T06:57:03.241+0000 7f754e0fc000 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-10T06:57:03.503 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 06:57:03 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[49719]: 2026-03-10T06:57:03.300+0000 7f754e0fc000 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-10T06:57:03.914 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 06:57:03 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[49719]: 2026-03-10T06:57:03.606+0000 7f754e0fc000 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-10T06:57:03.914 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 06:57:03 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[49719]: 2026-03-10T06:57:03.674+0000 7f754e0fc000 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-10T06:57:04.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:03 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:57:04.246 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:03 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:04.246 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:03 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T06:57:04.246 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:03 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T06:57:04.246 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:03 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:57:04.503 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 06:57:04 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[49719]: 2026-03-10T06:57:04.245+0000 7f754e0fc000 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-10T06:57:04.503 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 06:57:04 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[49719]: 2026-03-10T06:57:04.307+0000 7f754e0fc000 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-10T06:57:04.503 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 06:57:04 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[49719]: 2026-03-10T06:57:04.370+0000 7f754e0fc000 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-10T06:57:04.503 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 06:57:04 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[49719]: 2026-03-10T06:57:04.494+0000 7f754e0fc000 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-10T06:57:04.916 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 06:57:04 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[49719]: 2026-03-10T06:57:04.553+0000 7f754e0fc000 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-10T06:57:04.916 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 06:57:04 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[49719]: 2026-03-10T06:57:04.643+0000 7f754e0fc000 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-10T06:57:04.916 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 06:57:04 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[49719]: 2026-03-10T06:57:04.727+0000 7f754e0fc000 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-10T06:57:05.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:04 vm05 ceph-mon[48591]: from='client.14220 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm02:/dev/vde", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T06:57:05.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:04 vm05 ceph-mon[48591]: pgmap v7: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T06:57:05.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:04 vm05 ceph-mon[48591]: from='client.? 192.168.123.102:0/957797956' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "35204e1a-6579-424a-9923-9986832c655c"}]: dispatch 2026-03-10T06:57:05.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:04 vm05 ceph-mon[48591]: from='client.? 192.168.123.102:0/957797956' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "35204e1a-6579-424a-9923-9986832c655c"}]': finished 2026-03-10T06:57:05.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:04 vm05 ceph-mon[48591]: osdmap e5: 1 total, 0 up, 1 in 2026-03-10T06:57:05.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:04 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T06:57:05.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:04 vm05 ceph-mon[48591]: from='client.? 192.168.123.102:0/3344754119' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T06:57:05.253 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 06:57:05 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[49719]: 2026-03-10T06:57:05.029+0000 7f754e0fc000 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-10T06:57:05.253 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 06:57:05 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[49719]: 2026-03-10T06:57:05.085+0000 7f754e0fc000 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-10T06:57:05.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:04 vm02 ceph-mon[50158]: from='client.14220 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm02:/dev/vde", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T06:57:05.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:04 vm02 ceph-mon[50158]: pgmap v7: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T06:57:05.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:04 vm02 ceph-mon[50158]: from='client.? 192.168.123.102:0/957797956' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "35204e1a-6579-424a-9923-9986832c655c"}]: dispatch 2026-03-10T06:57:05.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:04 vm02 ceph-mon[50158]: from='client.? 192.168.123.102:0/957797956' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "35204e1a-6579-424a-9923-9986832c655c"}]': finished 2026-03-10T06:57:05.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:04 vm02 ceph-mon[50158]: osdmap e5: 1 total, 0 up, 1 in 2026-03-10T06:57:05.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:04 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T06:57:05.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:04 vm02 ceph-mon[50158]: from='client.? 192.168.123.102:0/3344754119' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T06:57:05.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:04 vm02 ceph-mon[54377]: from='client.? 192.168.123.102:0/3344754119' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T06:57:06.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:05 vm05 ceph-mon[48591]: Standby manager daemon x started 2026-03-10T06:57:06.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:05 vm05 ceph-mon[48591]: from='mgr.? 192.168.123.105:0/3217909562' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-10T06:57:06.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:05 vm05 ceph-mon[48591]: from='mgr.? 192.168.123.105:0/3217909562' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T06:57:06.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:05 vm05 ceph-mon[48591]: from='mgr.? 192.168.123.105:0/3217909562' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-10T06:57:06.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:05 vm05 ceph-mon[48591]: from='mgr.? 192.168.123.105:0/3217909562' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T06:57:06.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:05 vm02 ceph-mon[50158]: Standby manager daemon x started 2026-03-10T06:57:06.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:05 vm02 ceph-mon[50158]: from='mgr.? 192.168.123.105:0/3217909562' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-10T06:57:06.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:05 vm02 ceph-mon[50158]: from='mgr.? 192.168.123.105:0/3217909562' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T06:57:06.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:05 vm02 ceph-mon[50158]: from='mgr.? 192.168.123.105:0/3217909562' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-10T06:57:06.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:05 vm02 ceph-mon[50158]: from='mgr.? 192.168.123.105:0/3217909562' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T06:57:06.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:05 vm02 ceph-mon[54377]: Standby manager daemon x started 2026-03-10T06:57:06.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:05 vm02 ceph-mon[54377]: from='mgr.? 192.168.123.105:0/3217909562' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-10T06:57:06.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:05 vm02 ceph-mon[54377]: from='mgr.? 192.168.123.105:0/3217909562' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T06:57:06.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:05 vm02 ceph-mon[54377]: from='mgr.? 192.168.123.105:0/3217909562' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-10T06:57:06.336 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:05 vm02 ceph-mon[54377]: from='mgr.? 192.168.123.105:0/3217909562' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T06:57:07.072 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:06 vm02 ceph-mon[50158]: pgmap v9: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T06:57:07.072 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:06 vm02 ceph-mon[50158]: mgrmap e14: y(active, since 28s), standbys: x 2026-03-10T06:57:07.072 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:06 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-10T06:57:07.073 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:06 vm02 ceph-mon[54377]: pgmap v9: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T06:57:07.073 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:06 vm02 ceph-mon[54377]: mgrmap e14: y(active, since 28s), standbys: x 2026-03-10T06:57:07.073 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:06 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-10T06:57:07.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:06 vm05 ceph-mon[48591]: pgmap v9: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T06:57:07.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:06 vm05 ceph-mon[48591]: mgrmap e14: y(active, since 28s), standbys: x 2026-03-10T06:57:07.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:06 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-10T06:57:08.199 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:07 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.0"}]: dispatch 2026-03-10T06:57:08.199 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:07 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:57:08.199 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:07 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.0"}]: dispatch 2026-03-10T06:57:08.199 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:07 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:57:08.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:07 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.0"}]: dispatch 2026-03-10T06:57:08.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:07 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:57:09.126 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:08 vm02 ceph-mon[50158]: pgmap v10: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T06:57:09.127 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:08 vm02 ceph-mon[50158]: Deploying daemon osd.0 on vm02 2026-03-10T06:57:09.127 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:08 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:09.127 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:08 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:57:09.127 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:08 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:57:09.127 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:08 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:57:09.127 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:08 vm02 ceph-mon[54377]: pgmap v10: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T06:57:09.127 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:08 vm02 ceph-mon[54377]: Deploying daemon osd.0 on vm02 2026-03-10T06:57:09.127 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:08 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:09.127 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:08 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:57:09.127 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:08 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:57:09.127 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:08 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:57:09.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:08 vm05 ceph-mon[48591]: pgmap v10: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T06:57:09.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:08 vm05 ceph-mon[48591]: Deploying daemon osd.0 on vm02 2026-03-10T06:57:09.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:08 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:09.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:08 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:57:09.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:08 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:57:09.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:08 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:57:09.502 INFO:teuthology.orchestra.run.vm02.stdout:Created osd(s) 0 on host 'vm02' 2026-03-10T06:57:09.557 DEBUG:teuthology.orchestra.run.vm02:osd.0> sudo journalctl -f -n 0 -u ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@osd.0.service 2026-03-10T06:57:09.558 INFO:tasks.cephadm:Deploying osd.1 on vm02 with /dev/vdd... 2026-03-10T06:57:09.558 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc -- lvm zap /dev/vdd 2026-03-10T06:57:10.644 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:10 vm02 ceph-mon[50158]: pgmap v11: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T06:57:10.644 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:10 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:10.644 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:10 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:10.644 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:10 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:10.644 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:10 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:57:10.644 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:10 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:57:10.644 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:10 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:57:10.644 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:10 vm02 ceph-mon[50158]: from='osd.0 [v2:192.168.123.102:6802/1562453044,v1:192.168.123.102:6803/1562453044]' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-10T06:57:10.644 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:10 vm02 ceph-mon[54377]: pgmap v11: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T06:57:10.644 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:10 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:10.644 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:10 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:10.644 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:10 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:10.644 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:10 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:57:10.644 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:10 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:57:10.644 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:10 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:57:10.645 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:10 vm02 ceph-mon[54377]: from='osd.0 [v2:192.168.123.102:6802/1562453044,v1:192.168.123.102:6803/1562453044]' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-10T06:57:10.645 INFO:journalctl@ceph.osd.0.vm02.stdout:Mar 10 06:57:10 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-0[57409]: 2026-03-10T06:57:10.383+0000 7f3eb8d323c0 -1 osd.0 0 log_to_monitors true 2026-03-10T06:57:10.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:10 vm05 ceph-mon[48591]: pgmap v11: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T06:57:10.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:10 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:10.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:10 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:10.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:10 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:10.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:10 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:57:10.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:10 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:57:10.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:10 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:57:10.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:10 vm05 ceph-mon[48591]: from='osd.0 [v2:192.168.123.102:6802/1562453044,v1:192.168.123.102:6803/1562453044]' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-10T06:57:10.939 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-10T06:57:10.953 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc -- ceph orch daemon add osd vm02:/dev/vdd 2026-03-10T06:57:11.671 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:11 vm02 ceph-mon[50158]: Detected new or changed devices on vm02 2026-03-10T06:57:11.671 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:11 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:11.671 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:11 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm02", "name": "osd_memory_target"}]: dispatch 2026-03-10T06:57:11.671 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:11 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:11.671 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:11 vm02 ceph-mon[50158]: from='osd.0 [v2:192.168.123.102:6802/1562453044,v1:192.168.123.102:6803/1562453044]' entity='osd.0' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]': finished 2026-03-10T06:57:11.671 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:11 vm02 ceph-mon[50158]: osdmap e6: 1 total, 0 up, 1 in 2026-03-10T06:57:11.671 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:11 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T06:57:11.671 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:11 vm02 ceph-mon[50158]: from='osd.0 [v2:192.168.123.102:6802/1562453044,v1:192.168.123.102:6803/1562453044]' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm02", "root=default"]}]: dispatch 2026-03-10T06:57:11.671 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:11 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T06:57:11.672 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:11 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T06:57:11.672 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:11 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:57:11.672 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:11 vm02 ceph-mon[54377]: Detected new or changed devices on vm02 2026-03-10T06:57:11.672 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:11 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:11.672 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:11 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm02", "name": "osd_memory_target"}]: dispatch 2026-03-10T06:57:11.672 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:11 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:11.672 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:11 vm02 ceph-mon[54377]: from='osd.0 [v2:192.168.123.102:6802/1562453044,v1:192.168.123.102:6803/1562453044]' entity='osd.0' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]': finished 2026-03-10T06:57:11.672 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:11 vm02 ceph-mon[54377]: osdmap e6: 1 total, 0 up, 1 in 2026-03-10T06:57:11.672 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:11 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T06:57:11.672 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:11 vm02 ceph-mon[54377]: from='osd.0 [v2:192.168.123.102:6802/1562453044,v1:192.168.123.102:6803/1562453044]' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm02", "root=default"]}]: dispatch 2026-03-10T06:57:11.672 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:11 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T06:57:11.672 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:11 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T06:57:11.672 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:11 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:57:12.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:11 vm05 ceph-mon[48591]: Detected new or changed devices on vm02 2026-03-10T06:57:12.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:11 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:12.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:11 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm02", "name": "osd_memory_target"}]: dispatch 2026-03-10T06:57:12.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:11 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:12.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:11 vm05 ceph-mon[48591]: from='osd.0 [v2:192.168.123.102:6802/1562453044,v1:192.168.123.102:6803/1562453044]' entity='osd.0' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]': finished 2026-03-10T06:57:12.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:11 vm05 ceph-mon[48591]: osdmap e6: 1 total, 0 up, 1 in 2026-03-10T06:57:12.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:11 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T06:57:12.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:11 vm05 ceph-mon[48591]: from='osd.0 [v2:192.168.123.102:6802/1562453044,v1:192.168.123.102:6803/1562453044]' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm02", "root=default"]}]: dispatch 2026-03-10T06:57:12.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:11 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T06:57:12.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:11 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T06:57:12.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:11 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:57:12.335 INFO:journalctl@ceph.osd.0.vm02.stdout:Mar 10 06:57:12 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-0[57409]: 2026-03-10T06:57:12.073+0000 7f3eb0f38700 -1 osd.0 0 waiting for initial osdmap 2026-03-10T06:57:12.335 INFO:journalctl@ceph.osd.0.vm02.stdout:Mar 10 06:57:12 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-0[57409]: 2026-03-10T06:57:12.080+0000 7f3eab8cf700 -1 osd.0 7 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-10T06:57:13.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:12 vm05 ceph-mon[48591]: pgmap v13: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T06:57:13.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:12 vm05 ceph-mon[48591]: from='client.14241 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm02:/dev/vdd", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T06:57:13.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:12 vm05 ceph-mon[48591]: from='client.? 192.168.123.102:0/3175185155' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "eb30f5d3-2080-416b-b174-8350b4a9f19d"}]: dispatch 2026-03-10T06:57:13.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:12 vm05 ceph-mon[48591]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "eb30f5d3-2080-416b-b174-8350b4a9f19d"}]: dispatch 2026-03-10T06:57:13.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:12 vm05 ceph-mon[48591]: from='osd.0 [v2:192.168.123.102:6802/1562453044,v1:192.168.123.102:6803/1562453044]' entity='osd.0' cmd='[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm02", "root=default"]}]': finished 2026-03-10T06:57:13.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:12 vm05 ceph-mon[48591]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "eb30f5d3-2080-416b-b174-8350b4a9f19d"}]': finished 2026-03-10T06:57:13.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:12 vm05 ceph-mon[48591]: osdmap e7: 2 total, 0 up, 2 in 2026-03-10T06:57:13.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:12 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T06:57:13.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:12 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T06:57:13.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:12 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T06:57:13.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:12 vm05 ceph-mon[48591]: from='client.? 192.168.123.102:0/2686325788' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T06:57:13.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:12 vm02 ceph-mon[54377]: pgmap v13: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T06:57:13.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:12 vm02 ceph-mon[54377]: from='client.14241 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm02:/dev/vdd", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T06:57:13.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:12 vm02 ceph-mon[54377]: from='client.? 192.168.123.102:0/3175185155' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "eb30f5d3-2080-416b-b174-8350b4a9f19d"}]: dispatch 2026-03-10T06:57:13.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:12 vm02 ceph-mon[54377]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "eb30f5d3-2080-416b-b174-8350b4a9f19d"}]: dispatch 2026-03-10T06:57:13.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:12 vm02 ceph-mon[54377]: from='osd.0 [v2:192.168.123.102:6802/1562453044,v1:192.168.123.102:6803/1562453044]' entity='osd.0' cmd='[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm02", "root=default"]}]': finished 2026-03-10T06:57:13.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:12 vm02 ceph-mon[54377]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "eb30f5d3-2080-416b-b174-8350b4a9f19d"}]': finished 2026-03-10T06:57:13.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:12 vm02 ceph-mon[54377]: osdmap e7: 2 total, 0 up, 2 in 2026-03-10T06:57:13.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:12 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T06:57:13.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:12 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T06:57:13.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:12 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T06:57:13.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:12 vm02 ceph-mon[54377]: from='client.? 192.168.123.102:0/2686325788' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T06:57:13.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:12 vm02 ceph-mon[50158]: pgmap v13: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T06:57:13.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:12 vm02 ceph-mon[50158]: from='client.14241 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm02:/dev/vdd", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T06:57:13.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:12 vm02 ceph-mon[50158]: from='client.? 192.168.123.102:0/3175185155' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "eb30f5d3-2080-416b-b174-8350b4a9f19d"}]: dispatch 2026-03-10T06:57:13.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:12 vm02 ceph-mon[50158]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "eb30f5d3-2080-416b-b174-8350b4a9f19d"}]: dispatch 2026-03-10T06:57:13.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:12 vm02 ceph-mon[50158]: from='osd.0 [v2:192.168.123.102:6802/1562453044,v1:192.168.123.102:6803/1562453044]' entity='osd.0' cmd='[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm02", "root=default"]}]': finished 2026-03-10T06:57:13.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:12 vm02 ceph-mon[50158]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "eb30f5d3-2080-416b-b174-8350b4a9f19d"}]': finished 2026-03-10T06:57:13.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:12 vm02 ceph-mon[50158]: osdmap e7: 2 total, 0 up, 2 in 2026-03-10T06:57:13.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:12 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T06:57:13.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:12 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T06:57:13.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:12 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T06:57:13.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:12 vm02 ceph-mon[50158]: from='client.? 192.168.123.102:0/2686325788' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T06:57:14.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:14 vm02 ceph-mon[54377]: purged_snaps scrub starts 2026-03-10T06:57:14.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:14 vm02 ceph-mon[54377]: purged_snaps scrub ok 2026-03-10T06:57:14.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:14 vm02 ceph-mon[54377]: osd.0 [v2:192.168.123.102:6802/1562453044,v1:192.168.123.102:6803/1562453044] boot 2026-03-10T06:57:14.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:14 vm02 ceph-mon[54377]: osdmap e8: 2 total, 1 up, 2 in 2026-03-10T06:57:14.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:14 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T06:57:14.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:14 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T06:57:14.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:14 vm02 ceph-mon[50158]: purged_snaps scrub starts 2026-03-10T06:57:14.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:14 vm02 ceph-mon[50158]: purged_snaps scrub ok 2026-03-10T06:57:14.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:14 vm02 ceph-mon[50158]: osd.0 [v2:192.168.123.102:6802/1562453044,v1:192.168.123.102:6803/1562453044] boot 2026-03-10T06:57:14.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:14 vm02 ceph-mon[50158]: osdmap e8: 2 total, 1 up, 2 in 2026-03-10T06:57:14.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:14 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T06:57:14.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:14 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T06:57:14.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:14 vm05 ceph-mon[48591]: purged_snaps scrub starts 2026-03-10T06:57:14.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:14 vm05 ceph-mon[48591]: purged_snaps scrub ok 2026-03-10T06:57:14.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:14 vm05 ceph-mon[48591]: osd.0 [v2:192.168.123.102:6802/1562453044,v1:192.168.123.102:6803/1562453044] boot 2026-03-10T06:57:14.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:14 vm05 ceph-mon[48591]: osdmap e8: 2 total, 1 up, 2 in 2026-03-10T06:57:14.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:14 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T06:57:14.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:14 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T06:57:15.272 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:15 vm02 ceph-mon[50158]: pgmap v16: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T06:57:15.272 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:15 vm02 ceph-mon[54377]: pgmap v16: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T06:57:15.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:15 vm05 ceph-mon[48591]: pgmap v16: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T06:57:16.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:16 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.1"}]: dispatch 2026-03-10T06:57:16.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:16 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:57:16.541 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:16 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.1"}]: dispatch 2026-03-10T06:57:16.541 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:16 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:57:16.541 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:16 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.1"}]: dispatch 2026-03-10T06:57:16.541 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:16 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:57:17.178 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:17 vm02 ceph-mon[50158]: Deploying daemon osd.1 on vm02 2026-03-10T06:57:17.178 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:17 vm02 ceph-mon[50158]: pgmap v17: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-10T06:57:17.178 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:17 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:17.178 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:17 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:57:17.178 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:17 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:57:17.178 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:17 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:57:17.178 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:17 vm02 ceph-mon[54377]: Deploying daemon osd.1 on vm02 2026-03-10T06:57:17.178 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:17 vm02 ceph-mon[54377]: pgmap v17: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-10T06:57:17.178 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:17 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:17.178 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:17 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:57:17.178 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:17 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:57:17.178 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:17 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:57:17.500 INFO:teuthology.orchestra.run.vm02.stdout:Created osd(s) 1 on host 'vm02' 2026-03-10T06:57:17.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:17 vm05 ceph-mon[48591]: Deploying daemon osd.1 on vm02 2026-03-10T06:57:17.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:17 vm05 ceph-mon[48591]: pgmap v17: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-10T06:57:17.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:17 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:17.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:17 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:57:17.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:17 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:57:17.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:17 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:57:17.570 DEBUG:teuthology.orchestra.run.vm02:osd.1> sudo journalctl -f -n 0 -u ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@osd.1.service 2026-03-10T06:57:17.572 INFO:tasks.cephadm:Deploying osd.2 on vm02 with /dev/vdc... 2026-03-10T06:57:17.572 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc -- lvm zap /dev/vdc 2026-03-10T06:57:18.087 INFO:journalctl@ceph.osd.1.vm02.stdout:Mar 10 06:57:17 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-1[60137]: 2026-03-10T06:57:17.849+0000 7efc49e353c0 -1 osd.1 0 log_to_monitors true 2026-03-10T06:57:18.254 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-10T06:57:18.267 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc -- ceph orch daemon add osd vm02:/dev/vdc 2026-03-10T06:57:18.444 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:18 vm02 ceph-mon[50158]: pgmap v18: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-10T06:57:18.444 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:18 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:18.444 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:18 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:18.444 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:18 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:18.444 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:18 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:57:18.444 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:18 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:57:18.444 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:18 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:57:18.444 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:18 vm02 ceph-mon[50158]: from='osd.1 [v2:192.168.123.102:6810/2672109890,v1:192.168.123.102:6811/2672109890]' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-10T06:57:18.444 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:18 vm02 ceph-mon[50158]: from='osd.1 ' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-10T06:57:18.444 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:18 vm02 ceph-mon[54377]: pgmap v18: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-10T06:57:18.444 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:18 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:18.444 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:18 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:18.444 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:18 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:18.444 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:18 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:57:18.444 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:18 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:57:18.444 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:18 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:57:18.444 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:18 vm02 ceph-mon[54377]: from='osd.1 [v2:192.168.123.102:6810/2672109890,v1:192.168.123.102:6811/2672109890]' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-10T06:57:18.444 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:18 vm02 ceph-mon[54377]: from='osd.1 ' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-10T06:57:18.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:18 vm05 ceph-mon[48591]: pgmap v18: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-10T06:57:18.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:18 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:18.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:18 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:18.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:18 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:18.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:18 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:57:18.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:18 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:57:18.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:18 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:57:18.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:18 vm05 ceph-mon[48591]: from='osd.1 [v2:192.168.123.102:6810/2672109890,v1:192.168.123.102:6811/2672109890]' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-10T06:57:18.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:18 vm05 ceph-mon[48591]: from='osd.1 ' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-10T06:57:19.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:19 vm02 ceph-mon[50158]: from='osd.1 ' entity='osd.1' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]': finished 2026-03-10T06:57:19.836 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:19 vm02 ceph-mon[50158]: osdmap e9: 2 total, 1 up, 2 in 2026-03-10T06:57:19.836 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:19 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T06:57:19.836 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:19 vm02 ceph-mon[50158]: from='osd.1 [v2:192.168.123.102:6810/2672109890,v1:192.168.123.102:6811/2672109890]' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm02", "root=default"]}]: dispatch 2026-03-10T06:57:19.836 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:19 vm02 ceph-mon[50158]: from='osd.1 ' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm02", "root=default"]}]: dispatch 2026-03-10T06:57:19.836 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:19 vm02 ceph-mon[50158]: from='client.24139 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm02:/dev/vdc", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T06:57:19.836 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:19 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T06:57:19.836 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:19 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T06:57:19.836 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:19 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:57:19.836 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:19 vm02 ceph-mon[50158]: Detected new or changed devices on vm02 2026-03-10T06:57:19.836 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:19 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:19.836 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:19 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm02", "name": "osd_memory_target"}]: dispatch 2026-03-10T06:57:19.836 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:19 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:19.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:19 vm02 ceph-mon[54377]: from='osd.1 ' entity='osd.1' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]': finished 2026-03-10T06:57:19.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:19 vm02 ceph-mon[54377]: osdmap e9: 2 total, 1 up, 2 in 2026-03-10T06:57:19.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:19 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T06:57:19.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:19 vm02 ceph-mon[54377]: from='osd.1 [v2:192.168.123.102:6810/2672109890,v1:192.168.123.102:6811/2672109890]' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm02", "root=default"]}]: dispatch 2026-03-10T06:57:19.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:19 vm02 ceph-mon[54377]: from='osd.1 ' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm02", "root=default"]}]: dispatch 2026-03-10T06:57:19.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:19 vm02 ceph-mon[54377]: from='client.24139 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm02:/dev/vdc", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T06:57:19.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:19 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T06:57:19.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:19 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T06:57:19.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:19 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:57:19.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:19 vm02 ceph-mon[54377]: Detected new or changed devices on vm02 2026-03-10T06:57:19.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:19 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:19.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:19 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm02", "name": "osd_memory_target"}]: dispatch 2026-03-10T06:57:19.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:19 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:19.836 INFO:journalctl@ceph.osd.1.vm02.stdout:Mar 10 06:57:19 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-1[60137]: 2026-03-10T06:57:19.512+0000 7efc4203b700 -1 osd.1 0 waiting for initial osdmap 2026-03-10T06:57:19.836 INFO:journalctl@ceph.osd.1.vm02.stdout:Mar 10 06:57:19 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-1[60137]: 2026-03-10T06:57:19.520+0000 7efc3d1d3700 -1 osd.1 10 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-10T06:57:20.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:19 vm05 ceph-mon[48591]: from='osd.1 ' entity='osd.1' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]': finished 2026-03-10T06:57:20.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:19 vm05 ceph-mon[48591]: osdmap e9: 2 total, 1 up, 2 in 2026-03-10T06:57:20.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:19 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T06:57:20.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:19 vm05 ceph-mon[48591]: from='osd.1 [v2:192.168.123.102:6810/2672109890,v1:192.168.123.102:6811/2672109890]' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm02", "root=default"]}]: dispatch 2026-03-10T06:57:20.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:19 vm05 ceph-mon[48591]: from='osd.1 ' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm02", "root=default"]}]: dispatch 2026-03-10T06:57:20.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:19 vm05 ceph-mon[48591]: from='client.24139 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm02:/dev/vdc", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T06:57:20.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:19 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T06:57:20.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:19 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T06:57:20.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:19 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:57:20.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:19 vm05 ceph-mon[48591]: Detected new or changed devices on vm02 2026-03-10T06:57:20.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:19 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:20.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:19 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm02", "name": "osd_memory_target"}]: dispatch 2026-03-10T06:57:20.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:19 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:20.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:20 vm02 ceph-mon[50158]: pgmap v20: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-10T06:57:20.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:20 vm02 ceph-mon[50158]: from='osd.1 ' entity='osd.1' cmd='[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm02", "root=default"]}]': finished 2026-03-10T06:57:20.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:20 vm02 ceph-mon[50158]: osdmap e10: 2 total, 1 up, 2 in 2026-03-10T06:57:20.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:20 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T06:57:20.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:20 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T06:57:20.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:20 vm02 ceph-mon[50158]: from='client.? 192.168.123.102:0/871398352' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "dd1d0f2f-d7ae-4bd8-8eb5-71d68bfb7ea3"}]: dispatch 2026-03-10T06:57:20.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:20 vm02 ceph-mon[50158]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "dd1d0f2f-d7ae-4bd8-8eb5-71d68bfb7ea3"}]: dispatch 2026-03-10T06:57:20.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:20 vm02 ceph-mon[50158]: osd.1 [v2:192.168.123.102:6810/2672109890,v1:192.168.123.102:6811/2672109890] boot 2026-03-10T06:57:20.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:20 vm02 ceph-mon[50158]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "dd1d0f2f-d7ae-4bd8-8eb5-71d68bfb7ea3"}]': finished 2026-03-10T06:57:20.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:20 vm02 ceph-mon[50158]: osdmap e11: 3 total, 2 up, 3 in 2026-03-10T06:57:20.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:20 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T06:57:20.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:20 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T06:57:20.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:20 vm02 ceph-mon[50158]: from='client.? 192.168.123.102:0/1208263791' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T06:57:20.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:20 vm02 ceph-mon[54377]: pgmap v20: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-10T06:57:20.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:20 vm02 ceph-mon[54377]: from='osd.1 ' entity='osd.1' cmd='[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm02", "root=default"]}]': finished 2026-03-10T06:57:20.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:20 vm02 ceph-mon[54377]: osdmap e10: 2 total, 1 up, 2 in 2026-03-10T06:57:20.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:20 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T06:57:20.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:20 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T06:57:20.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:20 vm02 ceph-mon[54377]: from='client.? 192.168.123.102:0/871398352' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "dd1d0f2f-d7ae-4bd8-8eb5-71d68bfb7ea3"}]: dispatch 2026-03-10T06:57:20.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:20 vm02 ceph-mon[54377]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "dd1d0f2f-d7ae-4bd8-8eb5-71d68bfb7ea3"}]: dispatch 2026-03-10T06:57:20.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:20 vm02 ceph-mon[54377]: osd.1 [v2:192.168.123.102:6810/2672109890,v1:192.168.123.102:6811/2672109890] boot 2026-03-10T06:57:20.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:20 vm02 ceph-mon[54377]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "dd1d0f2f-d7ae-4bd8-8eb5-71d68bfb7ea3"}]': finished 2026-03-10T06:57:20.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:20 vm02 ceph-mon[54377]: osdmap e11: 3 total, 2 up, 3 in 2026-03-10T06:57:20.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:20 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T06:57:20.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:20 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T06:57:20.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:20 vm02 ceph-mon[54377]: from='client.? 192.168.123.102:0/1208263791' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T06:57:21.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:20 vm05 ceph-mon[48591]: pgmap v20: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-10T06:57:21.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:20 vm05 ceph-mon[48591]: from='osd.1 ' entity='osd.1' cmd='[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm02", "root=default"]}]': finished 2026-03-10T06:57:21.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:20 vm05 ceph-mon[48591]: osdmap e10: 2 total, 1 up, 2 in 2026-03-10T06:57:21.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:20 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T06:57:21.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:20 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T06:57:21.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:20 vm05 ceph-mon[48591]: from='client.? 192.168.123.102:0/871398352' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "dd1d0f2f-d7ae-4bd8-8eb5-71d68bfb7ea3"}]: dispatch 2026-03-10T06:57:21.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:20 vm05 ceph-mon[48591]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "dd1d0f2f-d7ae-4bd8-8eb5-71d68bfb7ea3"}]: dispatch 2026-03-10T06:57:21.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:20 vm05 ceph-mon[48591]: osd.1 [v2:192.168.123.102:6810/2672109890,v1:192.168.123.102:6811/2672109890] boot 2026-03-10T06:57:21.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:20 vm05 ceph-mon[48591]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "dd1d0f2f-d7ae-4bd8-8eb5-71d68bfb7ea3"}]': finished 2026-03-10T06:57:21.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:20 vm05 ceph-mon[48591]: osdmap e11: 3 total, 2 up, 3 in 2026-03-10T06:57:21.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:20 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T06:57:21.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:20 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T06:57:21.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:20 vm05 ceph-mon[48591]: from='client.? 192.168.123.102:0/1208263791' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T06:57:21.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:21 vm02 ceph-mon[50158]: purged_snaps scrub starts 2026-03-10T06:57:21.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:21 vm02 ceph-mon[50158]: purged_snaps scrub ok 2026-03-10T06:57:21.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:21 vm02 ceph-mon[54377]: purged_snaps scrub starts 2026-03-10T06:57:21.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:21 vm02 ceph-mon[54377]: purged_snaps scrub ok 2026-03-10T06:57:22.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:21 vm05 ceph-mon[48591]: purged_snaps scrub starts 2026-03-10T06:57:22.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:21 vm05 ceph-mon[48591]: purged_snaps scrub ok 2026-03-10T06:57:22.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:22 vm02 ceph-mon[50158]: pgmap v23: 0 pgs: ; 0 B data, 9.6 MiB used, 40 GiB / 40 GiB avail 2026-03-10T06:57:22.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:22 vm02 ceph-mon[54377]: pgmap v23: 0 pgs: ; 0 B data, 9.6 MiB used, 40 GiB / 40 GiB avail 2026-03-10T06:57:23.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:22 vm05 ceph-mon[48591]: pgmap v23: 0 pgs: ; 0 B data, 9.6 MiB used, 40 GiB / 40 GiB avail 2026-03-10T06:57:23.744 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:23 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.2"}]: dispatch 2026-03-10T06:57:23.745 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:23 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:57:23.745 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:23 vm02 ceph-mon[50158]: Deploying daemon osd.2 on vm02 2026-03-10T06:57:23.746 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:23 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.2"}]: dispatch 2026-03-10T06:57:23.746 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:23 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:57:23.746 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:23 vm02 ceph-mon[54377]: Deploying daemon osd.2 on vm02 2026-03-10T06:57:24.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:23 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.2"}]: dispatch 2026-03-10T06:57:24.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:23 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:57:24.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:23 vm05 ceph-mon[48591]: Deploying daemon osd.2 on vm02 2026-03-10T06:57:25.171 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:25 vm02 ceph-mon[50158]: pgmap v24: 0 pgs: ; 0 B data, 9.6 MiB used, 40 GiB / 40 GiB avail 2026-03-10T06:57:25.171 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:25 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:25.171 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:25 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:57:25.171 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:25 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:57:25.171 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:25 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:57:25.444 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:25 vm02 ceph-mon[54377]: pgmap v24: 0 pgs: ; 0 B data, 9.6 MiB used, 40 GiB / 40 GiB avail 2026-03-10T06:57:25.444 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:25 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:25.444 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:25 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:57:25.444 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:25 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:57:25.444 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:25 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:57:25.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:25 vm05 ceph-mon[48591]: pgmap v24: 0 pgs: ; 0 B data, 9.6 MiB used, 40 GiB / 40 GiB avail 2026-03-10T06:57:25.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:25 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:25.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:25 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:57:25.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:25 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:57:25.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:25 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:57:25.515 INFO:teuthology.orchestra.run.vm02.stdout:Created osd(s) 2 on host 'vm02' 2026-03-10T06:57:25.590 DEBUG:teuthology.orchestra.run.vm02:osd.2> sudo journalctl -f -n 0 -u ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@osd.2.service 2026-03-10T06:57:25.591 INFO:tasks.cephadm:Deploying osd.3 on vm02 with /dev/vdb... 2026-03-10T06:57:25.591 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc -- lvm zap /dev/vdb 2026-03-10T06:57:26.585 INFO:journalctl@ceph.osd.2.vm02.stdout:Mar 10 06:57:26 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-2[62882]: 2026-03-10T06:57:26.226+0000 7fd6068ef3c0 -1 osd.2 0 log_to_monitors true 2026-03-10T06:57:26.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:26 vm02 ceph-mon[50158]: pgmap v25: 0 pgs: ; 0 B data, 9.6 MiB used, 40 GiB / 40 GiB avail 2026-03-10T06:57:26.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:26 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:26.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:26 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:26.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:26 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:26.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:26 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:57:26.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:26 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:57:26.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:26 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:57:26.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:26 vm02 ceph-mon[50158]: from='osd.2 [v2:192.168.123.102:6818/3047564050,v1:192.168.123.102:6819/3047564050]' entity='osd.2' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]: dispatch 2026-03-10T06:57:26.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:26 vm02 ceph-mon[54377]: pgmap v25: 0 pgs: ; 0 B data, 9.6 MiB used, 40 GiB / 40 GiB avail 2026-03-10T06:57:26.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:26 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:26.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:26 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:26.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:26 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:26.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:26 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:57:26.586 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:26 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:57:26.586 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:26 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:57:26.586 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:26 vm02 ceph-mon[54377]: from='osd.2 [v2:192.168.123.102:6818/3047564050,v1:192.168.123.102:6819/3047564050]' entity='osd.2' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]: dispatch 2026-03-10T06:57:26.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:26 vm05 ceph-mon[48591]: pgmap v25: 0 pgs: ; 0 B data, 9.6 MiB used, 40 GiB / 40 GiB avail 2026-03-10T06:57:26.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:26 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:26.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:26 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:26.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:26 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:26.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:26 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:57:26.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:26 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:57:26.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:26 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:57:26.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:26 vm05 ceph-mon[48591]: from='osd.2 [v2:192.168.123.102:6818/3047564050,v1:192.168.123.102:6819/3047564050]' entity='osd.2' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]: dispatch 2026-03-10T06:57:27.229 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-10T06:57:27.246 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc -- ceph orch daemon add osd vm02:/dev/vdb 2026-03-10T06:57:27.761 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:27 vm02 ceph-mon[50158]: from='osd.2 [v2:192.168.123.102:6818/3047564050,v1:192.168.123.102:6819/3047564050]' entity='osd.2' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]': finished 2026-03-10T06:57:27.761 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:27 vm02 ceph-mon[50158]: osdmap e12: 3 total, 2 up, 3 in 2026-03-10T06:57:27.761 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:27 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T06:57:27.761 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:27 vm02 ceph-mon[50158]: from='osd.2 [v2:192.168.123.102:6818/3047564050,v1:192.168.123.102:6819/3047564050]' entity='osd.2' cmd=[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm02", "root=default"]}]: dispatch 2026-03-10T06:57:27.761 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:27 vm02 ceph-mon[50158]: Detected new or changed devices on vm02 2026-03-10T06:57:27.761 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:27 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:27.761 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:27 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm02", "name": "osd_memory_target"}]: dispatch 2026-03-10T06:57:27.761 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:27 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:27.761 INFO:journalctl@ceph.osd.2.vm02.stdout:Mar 10 06:57:27 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-2[62882]: 2026-03-10T06:57:27.525+0000 7fd5fd2f2700 -1 osd.2 0 waiting for initial osdmap 2026-03-10T06:57:27.761 INFO:journalctl@ceph.osd.2.vm02.stdout:Mar 10 06:57:27 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-2[62882]: 2026-03-10T06:57:27.553+0000 7fd5f7488700 -1 osd.2 13 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-10T06:57:27.761 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:27 vm02 ceph-mon[54377]: from='osd.2 [v2:192.168.123.102:6818/3047564050,v1:192.168.123.102:6819/3047564050]' entity='osd.2' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]': finished 2026-03-10T06:57:27.761 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:27 vm02 ceph-mon[54377]: osdmap e12: 3 total, 2 up, 3 in 2026-03-10T06:57:27.761 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:27 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T06:57:27.761 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:27 vm02 ceph-mon[54377]: from='osd.2 [v2:192.168.123.102:6818/3047564050,v1:192.168.123.102:6819/3047564050]' entity='osd.2' cmd=[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm02", "root=default"]}]: dispatch 2026-03-10T06:57:27.761 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:27 vm02 ceph-mon[54377]: Detected new or changed devices on vm02 2026-03-10T06:57:27.761 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:27 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:27.761 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:27 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm02", "name": "osd_memory_target"}]: dispatch 2026-03-10T06:57:27.761 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:27 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:28.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:27 vm05 ceph-mon[48591]: from='osd.2 [v2:192.168.123.102:6818/3047564050,v1:192.168.123.102:6819/3047564050]' entity='osd.2' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]': finished 2026-03-10T06:57:28.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:27 vm05 ceph-mon[48591]: osdmap e12: 3 total, 2 up, 3 in 2026-03-10T06:57:28.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:27 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T06:57:28.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:27 vm05 ceph-mon[48591]: from='osd.2 [v2:192.168.123.102:6818/3047564050,v1:192.168.123.102:6819/3047564050]' entity='osd.2' cmd=[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm02", "root=default"]}]: dispatch 2026-03-10T06:57:28.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:27 vm05 ceph-mon[48591]: Detected new or changed devices on vm02 2026-03-10T06:57:28.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:27 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:28.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:27 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm02", "name": "osd_memory_target"}]: dispatch 2026-03-10T06:57:28.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:27 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:28.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:28 vm02 ceph-mon[50158]: pgmap v27: 0 pgs: ; 0 B data, 9.6 MiB used, 40 GiB / 40 GiB avail 2026-03-10T06:57:28.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:28 vm02 ceph-mon[50158]: from='osd.2 [v2:192.168.123.102:6818/3047564050,v1:192.168.123.102:6819/3047564050]' entity='osd.2' cmd='[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm02", "root=default"]}]': finished 2026-03-10T06:57:28.836 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:28 vm02 ceph-mon[50158]: osdmap e13: 3 total, 2 up, 3 in 2026-03-10T06:57:28.836 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:28 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T06:57:28.836 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:28 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T06:57:28.836 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:28 vm02 ceph-mon[50158]: from='client.14274 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm02:/dev/vdb", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T06:57:28.836 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:28 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T06:57:28.836 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:28 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T06:57:28.836 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:28 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:57:28.836 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:28 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T06:57:28.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:28 vm02 ceph-mon[54377]: pgmap v27: 0 pgs: ; 0 B data, 9.6 MiB used, 40 GiB / 40 GiB avail 2026-03-10T06:57:28.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:28 vm02 ceph-mon[54377]: from='osd.2 [v2:192.168.123.102:6818/3047564050,v1:192.168.123.102:6819/3047564050]' entity='osd.2' cmd='[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm02", "root=default"]}]': finished 2026-03-10T06:57:28.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:28 vm02 ceph-mon[54377]: osdmap e13: 3 total, 2 up, 3 in 2026-03-10T06:57:28.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:28 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T06:57:28.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:28 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T06:57:28.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:28 vm02 ceph-mon[54377]: from='client.14274 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm02:/dev/vdb", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T06:57:28.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:28 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T06:57:28.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:28 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T06:57:28.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:28 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:57:28.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:28 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T06:57:29.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:28 vm05 ceph-mon[48591]: pgmap v27: 0 pgs: ; 0 B data, 9.6 MiB used, 40 GiB / 40 GiB avail 2026-03-10T06:57:29.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:28 vm05 ceph-mon[48591]: from='osd.2 [v2:192.168.123.102:6818/3047564050,v1:192.168.123.102:6819/3047564050]' entity='osd.2' cmd='[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm02", "root=default"]}]': finished 2026-03-10T06:57:29.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:28 vm05 ceph-mon[48591]: osdmap e13: 3 total, 2 up, 3 in 2026-03-10T06:57:29.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:28 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T06:57:29.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:28 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T06:57:29.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:28 vm05 ceph-mon[48591]: from='client.14274 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm02:/dev/vdb", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T06:57:29.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:28 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T06:57:29.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:28 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T06:57:29.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:28 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:57:29.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:28 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T06:57:30.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:29 vm05 ceph-mon[48591]: purged_snaps scrub starts 2026-03-10T06:57:30.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:29 vm05 ceph-mon[48591]: purged_snaps scrub ok 2026-03-10T06:57:30.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:29 vm05 ceph-mon[48591]: from='client.? 192.168.123.102:0/1485229076' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "3a40e513-6ad9-43f1-ba74-0b37d785fad9"}]: dispatch 2026-03-10T06:57:30.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:29 vm05 ceph-mon[48591]: osd.2 [v2:192.168.123.102:6818/3047564050,v1:192.168.123.102:6819/3047564050] boot 2026-03-10T06:57:30.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:29 vm05 ceph-mon[48591]: osdmap e14: 3 total, 3 up, 3 in 2026-03-10T06:57:30.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:29 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T06:57:30.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:29 vm05 ceph-mon[48591]: from='client.? 192.168.123.102:0/1485229076' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "3a40e513-6ad9-43f1-ba74-0b37d785fad9"}]': finished 2026-03-10T06:57:30.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:29 vm05 ceph-mon[48591]: osdmap e15: 4 total, 3 up, 4 in 2026-03-10T06:57:30.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:29 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T06:57:30.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:29 vm05 ceph-mon[48591]: from='client.? 192.168.123.102:0/2560577293' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T06:57:30.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:29 vm02 ceph-mon[54377]: purged_snaps scrub starts 2026-03-10T06:57:30.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:29 vm02 ceph-mon[54377]: purged_snaps scrub ok 2026-03-10T06:57:30.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:29 vm02 ceph-mon[54377]: from='client.? 192.168.123.102:0/1485229076' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "3a40e513-6ad9-43f1-ba74-0b37d785fad9"}]: dispatch 2026-03-10T06:57:30.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:29 vm02 ceph-mon[54377]: osd.2 [v2:192.168.123.102:6818/3047564050,v1:192.168.123.102:6819/3047564050] boot 2026-03-10T06:57:30.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:29 vm02 ceph-mon[54377]: osdmap e14: 3 total, 3 up, 3 in 2026-03-10T06:57:30.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:29 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T06:57:30.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:29 vm02 ceph-mon[54377]: from='client.? 192.168.123.102:0/1485229076' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "3a40e513-6ad9-43f1-ba74-0b37d785fad9"}]': finished 2026-03-10T06:57:30.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:29 vm02 ceph-mon[54377]: osdmap e15: 4 total, 3 up, 4 in 2026-03-10T06:57:30.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:29 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T06:57:30.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:29 vm02 ceph-mon[54377]: from='client.? 192.168.123.102:0/2560577293' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T06:57:30.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:29 vm02 ceph-mon[50158]: purged_snaps scrub starts 2026-03-10T06:57:30.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:29 vm02 ceph-mon[50158]: purged_snaps scrub ok 2026-03-10T06:57:30.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:29 vm02 ceph-mon[50158]: from='client.? 192.168.123.102:0/1485229076' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "3a40e513-6ad9-43f1-ba74-0b37d785fad9"}]: dispatch 2026-03-10T06:57:30.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:29 vm02 ceph-mon[50158]: osd.2 [v2:192.168.123.102:6818/3047564050,v1:192.168.123.102:6819/3047564050] boot 2026-03-10T06:57:30.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:29 vm02 ceph-mon[50158]: osdmap e14: 3 total, 3 up, 3 in 2026-03-10T06:57:30.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:29 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T06:57:30.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:29 vm02 ceph-mon[50158]: from='client.? 192.168.123.102:0/1485229076' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "3a40e513-6ad9-43f1-ba74-0b37d785fad9"}]': finished 2026-03-10T06:57:30.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:29 vm02 ceph-mon[50158]: osdmap e15: 4 total, 3 up, 4 in 2026-03-10T06:57:30.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:29 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T06:57:30.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:29 vm02 ceph-mon[50158]: from='client.? 192.168.123.102:0/2560577293' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T06:57:31.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:30 vm05 ceph-mon[48591]: pgmap v31: 0 pgs: ; 0 B data, 9.7 MiB used, 40 GiB / 40 GiB avail 2026-03-10T06:57:31.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:30 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32}]: dispatch 2026-03-10T06:57:31.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:30 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd='[{"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32}]': finished 2026-03-10T06:57:31.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:30 vm05 ceph-mon[48591]: osdmap e16: 4 total, 3 up, 4 in 2026-03-10T06:57:31.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:30 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T06:57:31.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:30 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true}]: dispatch 2026-03-10T06:57:31.084 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:30 vm02 ceph-mon[54377]: pgmap v31: 0 pgs: ; 0 B data, 9.7 MiB used, 40 GiB / 40 GiB avail 2026-03-10T06:57:31.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:30 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32}]: dispatch 2026-03-10T06:57:31.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:30 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd='[{"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32}]': finished 2026-03-10T06:57:31.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:30 vm02 ceph-mon[54377]: osdmap e16: 4 total, 3 up, 4 in 2026-03-10T06:57:31.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:30 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T06:57:31.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:30 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true}]: dispatch 2026-03-10T06:57:31.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:30 vm02 ceph-mon[50158]: pgmap v31: 0 pgs: ; 0 B data, 9.7 MiB used, 40 GiB / 40 GiB avail 2026-03-10T06:57:31.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:30 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32}]: dispatch 2026-03-10T06:57:31.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:30 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd='[{"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32}]': finished 2026-03-10T06:57:31.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:30 vm02 ceph-mon[50158]: osdmap e16: 4 total, 3 up, 4 in 2026-03-10T06:57:31.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:30 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T06:57:31.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:30 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true}]: dispatch 2026-03-10T06:57:31.835 INFO:journalctl@ceph.osd.0.vm02.stdout:Mar 10 06:57:31 vm02 sudo[65225]: ceph : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/sbin/smartctl -x --json=o /dev/vde 2026-03-10T06:57:31.835 INFO:journalctl@ceph.osd.0.vm02.stdout:Mar 10 06:57:31 vm02 sudo[65225]: pam_unix(sudo:session): session opened for user root by (uid=0) 2026-03-10T06:57:31.835 INFO:journalctl@ceph.osd.0.vm02.stdout:Mar 10 06:57:31 vm02 sudo[65225]: pam_unix(sudo:session): session closed for user root 2026-03-10T06:57:32.224 INFO:journalctl@ceph.osd.1.vm02.stdout:Mar 10 06:57:31 vm02 sudo[65262]: ceph : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/sbin/smartctl -x --json=o /dev/vdd 2026-03-10T06:57:32.225 INFO:journalctl@ceph.osd.1.vm02.stdout:Mar 10 06:57:31 vm02 sudo[65262]: pam_unix(sudo:session): session opened for user root by (uid=0) 2026-03-10T06:57:32.225 INFO:journalctl@ceph.osd.1.vm02.stdout:Mar 10 06:57:31 vm02 sudo[65262]: pam_unix(sudo:session): session closed for user root 2026-03-10T06:57:32.225 INFO:journalctl@ceph.osd.2.vm02.stdout:Mar 10 06:57:32 vm02 sudo[65283]: ceph : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/sbin/smartctl -x --json=o /dev/vdc 2026-03-10T06:57:32.225 INFO:journalctl@ceph.osd.2.vm02.stdout:Mar 10 06:57:32 vm02 sudo[65283]: pam_unix(sudo:session): session opened for user root by (uid=0) 2026-03-10T06:57:32.225 INFO:journalctl@ceph.osd.2.vm02.stdout:Mar 10 06:57:32 vm02 sudo[65283]: pam_unix(sudo:session): session closed for user root 2026-03-10T06:57:32.499 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:32 vm02 sudo[65288]: ceph : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/sbin/smartctl -x --json=o /dev/vda 2026-03-10T06:57:32.499 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:32 vm02 sudo[65288]: pam_unix(sudo:session): session opened for user root by (uid=0) 2026-03-10T06:57:32.499 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:32 vm02 sudo[65288]: pam_unix(sudo:session): session closed for user root 2026-03-10T06:57:32.499 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:32 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd='[{"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true}]': finished 2026-03-10T06:57:32.499 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:32 vm02 ceph-mon[50158]: osdmap e17: 4 total, 3 up, 4 in 2026-03-10T06:57:32.499 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:32 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T06:57:32.499 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:32 vm02 ceph-mon[50158]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-10T06:57:32.500 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:32 vm02 sudo[65353]: ceph : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/sbin/smartctl -x --json=o /dev/vda 2026-03-10T06:57:32.500 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:32 vm02 sudo[65353]: pam_unix(sudo:session): session opened for user root by (uid=0) 2026-03-10T06:57:32.500 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:32 vm02 sudo[65353]: pam_unix(sudo:session): session closed for user root 2026-03-10T06:57:32.500 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:32 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd='[{"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true}]': finished 2026-03-10T06:57:32.500 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:32 vm02 ceph-mon[54377]: osdmap e17: 4 total, 3 up, 4 in 2026-03-10T06:57:32.500 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:32 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T06:57:32.500 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:32 vm02 ceph-mon[54377]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-10T06:57:32.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:32 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd='[{"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true}]': finished 2026-03-10T06:57:32.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:32 vm05 ceph-mon[48591]: osdmap e17: 4 total, 3 up, 4 in 2026-03-10T06:57:32.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:32 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T06:57:32.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:32 vm05 ceph-mon[48591]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-10T06:57:33.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:32 vm05 sudo[50270]: ceph : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/sbin/smartctl -x --json=o /dev/vda 2026-03-10T06:57:33.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:32 vm05 sudo[50270]: pam_unix(sudo:session): session opened for user root by (uid=0) 2026-03-10T06:57:33.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:32 vm05 sudo[50270]: pam_unix(sudo:session): session closed for user root 2026-03-10T06:57:33.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:33 vm05 ceph-mon[48591]: pgmap v34: 1 pgs: 1 creating+peering; 0 B data, 15 MiB used, 60 GiB / 60 GiB avail 2026-03-10T06:57:33.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:33 vm05 ceph-mon[48591]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-10T06:57:33.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:33 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T06:57:33.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:33 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T06:57:33.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:33 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T06:57:33.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:33 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.3"}]: dispatch 2026-03-10T06:57:33.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:33 vm05 ceph-mon[48591]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-10T06:57:33.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:33 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T06:57:33.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:33 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T06:57:33.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:33 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T06:57:33.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:33 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:57:33.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:33 vm05 ceph-mon[48591]: Deploying daemon osd.3 on vm02 2026-03-10T06:57:33.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:33 vm05 ceph-mon[48591]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-10T06:57:33.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:33 vm05 ceph-mon[48591]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-10T06:57:33.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:33 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T06:57:33.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:33 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T06:57:33.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:33 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T06:57:33.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:33 vm05 ceph-mon[48591]: osdmap e18: 4 total, 3 up, 4 in 2026-03-10T06:57:33.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:33 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T06:57:33.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:33 vm05 ceph-mon[48591]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-10T06:57:33.586 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:33 vm02 ceph-mon[50158]: pgmap v34: 1 pgs: 1 creating+peering; 0 B data, 15 MiB used, 60 GiB / 60 GiB avail 2026-03-10T06:57:33.586 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:33 vm02 ceph-mon[50158]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-10T06:57:33.586 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:33 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T06:57:33.586 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:33 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T06:57:33.586 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:33 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T06:57:33.586 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:33 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.3"}]: dispatch 2026-03-10T06:57:33.586 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:33 vm02 ceph-mon[50158]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-10T06:57:33.586 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:33 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T06:57:33.586 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:33 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T06:57:33.586 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:33 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T06:57:33.586 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:33 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:57:33.586 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:33 vm02 ceph-mon[50158]: Deploying daemon osd.3 on vm02 2026-03-10T06:57:33.586 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:33 vm02 ceph-mon[50158]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-10T06:57:33.586 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:33 vm02 ceph-mon[50158]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-10T06:57:33.586 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:33 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T06:57:33.586 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:33 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T06:57:33.586 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:33 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T06:57:33.586 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:33 vm02 ceph-mon[50158]: osdmap e18: 4 total, 3 up, 4 in 2026-03-10T06:57:33.586 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:33 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T06:57:33.586 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:33 vm02 ceph-mon[50158]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-10T06:57:33.586 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:33 vm02 ceph-mon[54377]: pgmap v34: 1 pgs: 1 creating+peering; 0 B data, 15 MiB used, 60 GiB / 60 GiB avail 2026-03-10T06:57:33.586 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:33 vm02 ceph-mon[54377]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-10T06:57:33.586 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:33 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T06:57:33.586 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:33 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T06:57:33.586 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:33 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T06:57:33.586 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:33 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.3"}]: dispatch 2026-03-10T06:57:33.586 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:33 vm02 ceph-mon[54377]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-10T06:57:33.586 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:33 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T06:57:33.586 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:33 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T06:57:33.586 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:33 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T06:57:33.586 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:33 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:57:33.586 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:33 vm02 ceph-mon[54377]: Deploying daemon osd.3 on vm02 2026-03-10T06:57:33.586 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:33 vm02 ceph-mon[54377]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-10T06:57:33.586 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:33 vm02 ceph-mon[54377]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-10T06:57:33.586 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:33 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T06:57:33.586 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:33 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T06:57:33.587 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:33 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T06:57:33.587 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:33 vm02 ceph-mon[54377]: osdmap e18: 4 total, 3 up, 4 in 2026-03-10T06:57:33.587 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:33 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T06:57:33.587 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:33 vm02 ceph-mon[54377]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-10T06:57:34.848 INFO:teuthology.orchestra.run.vm02.stdout:Created osd(s) 3 on host 'vm02' 2026-03-10T06:57:34.903 DEBUG:teuthology.orchestra.run.vm02:osd.3> sudo journalctl -f -n 0 -u ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@osd.3.service 2026-03-10T06:57:34.904 INFO:tasks.cephadm:Deploying osd.4 on vm05 with /dev/vde... 2026-03-10T06:57:34.904 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc -- lvm zap /dev/vde 2026-03-10T06:57:35.086 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:34 vm02 ceph-mon[50158]: pgmap v36: 1 pgs: 1 creating+peering; 0 B data, 15 MiB used, 60 GiB / 60 GiB avail 2026-03-10T06:57:35.087 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:34 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:35.087 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:34 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:57:35.087 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:34 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:57:35.087 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:34 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:57:35.087 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:34 vm02 ceph-mon[50158]: mgrmap e15: y(active, since 56s), standbys: x 2026-03-10T06:57:35.087 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:34 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:35.087 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:34 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:35.087 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:34 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:35.087 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:34 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:57:35.087 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:34 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:57:35.087 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:34 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:57:35.087 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:34 vm02 ceph-mon[54377]: pgmap v36: 1 pgs: 1 creating+peering; 0 B data, 15 MiB used, 60 GiB / 60 GiB avail 2026-03-10T06:57:35.087 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:34 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:35.087 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:34 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:57:35.087 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:34 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:57:35.087 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:34 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:57:35.087 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:34 vm02 ceph-mon[54377]: mgrmap e15: y(active, since 56s), standbys: x 2026-03-10T06:57:35.087 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:34 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:35.087 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:34 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:35.087 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:34 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:35.087 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:34 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:57:35.087 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:34 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:57:35.087 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:34 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:57:35.167 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:34 vm05 ceph-mon[48591]: pgmap v36: 1 pgs: 1 creating+peering; 0 B data, 15 MiB used, 60 GiB / 60 GiB avail 2026-03-10T06:57:35.167 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:34 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:35.167 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:34 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:57:35.167 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:34 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:57:35.167 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:34 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:57:35.167 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:34 vm05 ceph-mon[48591]: mgrmap e15: y(active, since 56s), standbys: x 2026-03-10T06:57:35.167 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:34 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:35.167 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:34 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:35.167 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:34 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:35.167 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:34 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:57:35.167 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:34 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:57:35.167 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:34 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:57:35.519 INFO:teuthology.orchestra.run.vm05.stdout: 2026-03-10T06:57:35.532 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc -- ceph orch daemon add osd vm05:/dev/vde 2026-03-10T06:57:36.335 INFO:journalctl@ceph.osd.3.vm02.stdout:Mar 10 06:57:36 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-3[65679]: 2026-03-10T06:57:36.273+0000 7f53176c83c0 -1 osd.3 0 log_to_monitors true 2026-03-10T06:57:37.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:36 vm05 ceph-mon[48591]: pgmap v37: 1 pgs: 1 creating+peering; 0 B data, 16 MiB used, 60 GiB / 60 GiB avail 2026-03-10T06:57:37.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:36 vm05 ceph-mon[48591]: from='client.24158 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm05:/dev/vde", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T06:57:37.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:36 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T06:57:37.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:36 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T06:57:37.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:36 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:57:37.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:36 vm05 ceph-mon[48591]: Detected new or changed devices on vm02 2026-03-10T06:57:37.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:36 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:37.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:36 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm02", "name": "osd_memory_target"}]: dispatch 2026-03-10T06:57:37.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:36 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:37.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:36 vm05 ceph-mon[48591]: from='osd.3 [v2:192.168.123.102:6826/3907781773,v1:192.168.123.102:6827/3907781773]' entity='osd.3' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]: dispatch 2026-03-10T06:57:37.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:36 vm05 ceph-mon[48591]: from='client.? 192.168.123.105:0/1333943086' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "fa4107e7-dcd8-4c34-a994-7706880ac944"}]: dispatch 2026-03-10T06:57:37.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:36 vm05 ceph-mon[48591]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "fa4107e7-dcd8-4c34-a994-7706880ac944"}]: dispatch 2026-03-10T06:57:37.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:36 vm05 ceph-mon[48591]: from='osd.3 [v2:192.168.123.102:6826/3907781773,v1:192.168.123.102:6827/3907781773]' entity='osd.3' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]': finished 2026-03-10T06:57:37.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:36 vm05 ceph-mon[48591]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "fa4107e7-dcd8-4c34-a994-7706880ac944"}]': finished 2026-03-10T06:57:37.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:36 vm05 ceph-mon[48591]: osdmap e19: 5 total, 3 up, 5 in 2026-03-10T06:57:37.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:36 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T06:57:37.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:36 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T06:57:37.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:36 vm05 ceph-mon[48591]: from='osd.3 [v2:192.168.123.102:6826/3907781773,v1:192.168.123.102:6827/3907781773]' entity='osd.3' cmd=[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm02", "root=default"]}]: dispatch 2026-03-10T06:57:37.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:36 vm02 ceph-mon[50158]: pgmap v37: 1 pgs: 1 creating+peering; 0 B data, 16 MiB used, 60 GiB / 60 GiB avail 2026-03-10T06:57:37.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:36 vm02 ceph-mon[50158]: from='client.24158 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm05:/dev/vde", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T06:57:37.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:36 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T06:57:37.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:36 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T06:57:37.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:36 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:57:37.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:36 vm02 ceph-mon[50158]: Detected new or changed devices on vm02 2026-03-10T06:57:37.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:36 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:37.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:36 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm02", "name": "osd_memory_target"}]: dispatch 2026-03-10T06:57:37.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:36 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:37.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:36 vm02 ceph-mon[50158]: from='osd.3 [v2:192.168.123.102:6826/3907781773,v1:192.168.123.102:6827/3907781773]' entity='osd.3' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]: dispatch 2026-03-10T06:57:37.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:36 vm02 ceph-mon[50158]: from='client.? 192.168.123.105:0/1333943086' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "fa4107e7-dcd8-4c34-a994-7706880ac944"}]: dispatch 2026-03-10T06:57:37.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:36 vm02 ceph-mon[50158]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "fa4107e7-dcd8-4c34-a994-7706880ac944"}]: dispatch 2026-03-10T06:57:37.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:36 vm02 ceph-mon[50158]: from='osd.3 [v2:192.168.123.102:6826/3907781773,v1:192.168.123.102:6827/3907781773]' entity='osd.3' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]': finished 2026-03-10T06:57:37.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:36 vm02 ceph-mon[50158]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "fa4107e7-dcd8-4c34-a994-7706880ac944"}]': finished 2026-03-10T06:57:37.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:36 vm02 ceph-mon[50158]: osdmap e19: 5 total, 3 up, 5 in 2026-03-10T06:57:37.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:36 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T06:57:37.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:36 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T06:57:37.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:36 vm02 ceph-mon[50158]: from='osd.3 [v2:192.168.123.102:6826/3907781773,v1:192.168.123.102:6827/3907781773]' entity='osd.3' cmd=[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm02", "root=default"]}]: dispatch 2026-03-10T06:57:37.336 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:36 vm02 ceph-mon[54377]: pgmap v37: 1 pgs: 1 creating+peering; 0 B data, 16 MiB used, 60 GiB / 60 GiB avail 2026-03-10T06:57:37.336 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:36 vm02 ceph-mon[54377]: from='client.24158 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm05:/dev/vde", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T06:57:37.336 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:36 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T06:57:37.336 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:36 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T06:57:37.336 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:36 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:57:37.336 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:36 vm02 ceph-mon[54377]: Detected new or changed devices on vm02 2026-03-10T06:57:37.336 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:36 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:37.336 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:36 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm02", "name": "osd_memory_target"}]: dispatch 2026-03-10T06:57:37.336 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:36 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:37.336 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:36 vm02 ceph-mon[54377]: from='osd.3 [v2:192.168.123.102:6826/3907781773,v1:192.168.123.102:6827/3907781773]' entity='osd.3' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]: dispatch 2026-03-10T06:57:37.336 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:36 vm02 ceph-mon[54377]: from='client.? 192.168.123.105:0/1333943086' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "fa4107e7-dcd8-4c34-a994-7706880ac944"}]: dispatch 2026-03-10T06:57:37.336 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:36 vm02 ceph-mon[54377]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "fa4107e7-dcd8-4c34-a994-7706880ac944"}]: dispatch 2026-03-10T06:57:37.336 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:36 vm02 ceph-mon[54377]: from='osd.3 [v2:192.168.123.102:6826/3907781773,v1:192.168.123.102:6827/3907781773]' entity='osd.3' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]': finished 2026-03-10T06:57:37.336 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:36 vm02 ceph-mon[54377]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "fa4107e7-dcd8-4c34-a994-7706880ac944"}]': finished 2026-03-10T06:57:37.336 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:36 vm02 ceph-mon[54377]: osdmap e19: 5 total, 3 up, 5 in 2026-03-10T06:57:37.336 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:36 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T06:57:37.336 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:36 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T06:57:37.336 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:36 vm02 ceph-mon[54377]: from='osd.3 [v2:192.168.123.102:6826/3907781773,v1:192.168.123.102:6827/3907781773]' entity='osd.3' cmd=[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm02", "root=default"]}]: dispatch 2026-03-10T06:57:37.929 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:37 vm02 ceph-mon[54377]: from='client.? 192.168.123.105:0/2387667741' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T06:57:37.929 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:37 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:57:37.929 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:37 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T06:57:37.929 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:37 vm02 ceph-mon[54377]: from='osd.3 [v2:192.168.123.102:6826/3907781773,v1:192.168.123.102:6827/3907781773]' entity='osd.3' cmd='[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm02", "root=default"]}]': finished 2026-03-10T06:57:37.929 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:37 vm02 ceph-mon[54377]: osdmap e20: 5 total, 3 up, 5 in 2026-03-10T06:57:37.929 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:37 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T06:57:37.930 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:37 vm02 ceph-mon[50158]: from='client.? 192.168.123.105:0/2387667741' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T06:57:37.930 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:37 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:57:37.930 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:37 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T06:57:37.930 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:37 vm02 ceph-mon[50158]: from='osd.3 [v2:192.168.123.102:6826/3907781773,v1:192.168.123.102:6827/3907781773]' entity='osd.3' cmd='[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm02", "root=default"]}]': finished 2026-03-10T06:57:37.930 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:37 vm02 ceph-mon[50158]: osdmap e20: 5 total, 3 up, 5 in 2026-03-10T06:57:37.930 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:37 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T06:57:37.930 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:37 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T06:57:37.930 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:37 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T06:57:37.930 INFO:journalctl@ceph.osd.3.vm02.stdout:Mar 10 06:57:37 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-3[65679]: 2026-03-10T06:57:37.818+0000 7f530e0cb700 -1 osd.3 0 waiting for initial osdmap 2026-03-10T06:57:37.930 INFO:journalctl@ceph.osd.3.vm02.stdout:Mar 10 06:57:37 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-3[65679]: 2026-03-10T06:57:37.827+0000 7f530aa66700 -1 osd.3 20 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-10T06:57:37.933 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:37 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T06:57:37.933 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:37 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T06:57:38.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:37 vm05 ceph-mon[48591]: from='client.? 192.168.123.105:0/2387667741' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T06:57:38.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:37 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:57:38.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:37 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T06:57:38.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:37 vm05 ceph-mon[48591]: from='osd.3 [v2:192.168.123.102:6826/3907781773,v1:192.168.123.102:6827/3907781773]' entity='osd.3' cmd='[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm02", "root=default"]}]': finished 2026-03-10T06:57:38.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:37 vm05 ceph-mon[48591]: osdmap e20: 5 total, 3 up, 5 in 2026-03-10T06:57:38.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:37 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T06:57:38.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:37 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T06:57:38.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:37 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T06:57:39.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:38 vm05 ceph-mon[48591]: pgmap v39: 1 pgs: 1 active+clean; 449 KiB data, 17 MiB used, 60 GiB / 60 GiB avail 2026-03-10T06:57:39.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:38 vm05 ceph-mon[48591]: osd.3 [v2:192.168.123.102:6826/3907781773,v1:192.168.123.102:6827/3907781773] boot 2026-03-10T06:57:39.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:38 vm05 ceph-mon[48591]: osdmap e21: 5 total, 4 up, 5 in 2026-03-10T06:57:39.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:38 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T06:57:39.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:38 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T06:57:39.334 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:38 vm02 ceph-mon[54377]: pgmap v39: 1 pgs: 1 active+clean; 449 KiB data, 17 MiB used, 60 GiB / 60 GiB avail 2026-03-10T06:57:39.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:38 vm02 ceph-mon[54377]: osd.3 [v2:192.168.123.102:6826/3907781773,v1:192.168.123.102:6827/3907781773] boot 2026-03-10T06:57:39.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:38 vm02 ceph-mon[54377]: osdmap e21: 5 total, 4 up, 5 in 2026-03-10T06:57:39.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:38 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T06:57:39.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:38 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T06:57:39.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:38 vm02 ceph-mon[50158]: pgmap v39: 1 pgs: 1 active+clean; 449 KiB data, 17 MiB used, 60 GiB / 60 GiB avail 2026-03-10T06:57:39.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:38 vm02 ceph-mon[50158]: osd.3 [v2:192.168.123.102:6826/3907781773,v1:192.168.123.102:6827/3907781773] boot 2026-03-10T06:57:39.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:38 vm02 ceph-mon[50158]: osdmap e21: 5 total, 4 up, 5 in 2026-03-10T06:57:39.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:38 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T06:57:39.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:38 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T06:57:40.464 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:40 vm05 ceph-mon[48591]: purged_snaps scrub starts 2026-03-10T06:57:40.464 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:40 vm05 ceph-mon[48591]: purged_snaps scrub ok 2026-03-10T06:57:40.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:40 vm02 ceph-mon[54377]: purged_snaps scrub starts 2026-03-10T06:57:40.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:40 vm02 ceph-mon[54377]: purged_snaps scrub ok 2026-03-10T06:57:40.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:40 vm02 ceph-mon[50158]: purged_snaps scrub starts 2026-03-10T06:57:40.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:40 vm02 ceph-mon[50158]: purged_snaps scrub ok 2026-03-10T06:57:41.288 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:41 vm05 ceph-mon[48591]: pgmap v42: 1 pgs: 1 active+clean; 449 KiB data, 22 MiB used, 80 GiB / 80 GiB avail 2026-03-10T06:57:41.288 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:41 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.4"}]: dispatch 2026-03-10T06:57:41.289 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:41 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:57:41.289 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:41 vm05 ceph-mon[48591]: Deploying daemon osd.4 on vm05 2026-03-10T06:57:41.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:41 vm02 ceph-mon[54377]: pgmap v42: 1 pgs: 1 active+clean; 449 KiB data, 22 MiB used, 80 GiB / 80 GiB avail 2026-03-10T06:57:41.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:41 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.4"}]: dispatch 2026-03-10T06:57:41.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:41 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:57:41.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:41 vm02 ceph-mon[54377]: Deploying daemon osd.4 on vm05 2026-03-10T06:57:41.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:41 vm02 ceph-mon[50158]: pgmap v42: 1 pgs: 1 active+clean; 449 KiB data, 22 MiB used, 80 GiB / 80 GiB avail 2026-03-10T06:57:41.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:41 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.4"}]: dispatch 2026-03-10T06:57:41.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:41 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:57:41.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:41 vm02 ceph-mon[50158]: Deploying daemon osd.4 on vm05 2026-03-10T06:57:42.695 INFO:teuthology.orchestra.run.vm05.stdout:Created osd(s) 4 on host 'vm05' 2026-03-10T06:57:42.761 DEBUG:teuthology.orchestra.run.vm05:osd.4> sudo journalctl -f -n 0 -u ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@osd.4.service 2026-03-10T06:57:42.762 INFO:tasks.cephadm:Deploying osd.5 on vm05 with /dev/vdd... 2026-03-10T06:57:42.762 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc -- lvm zap /dev/vdd 2026-03-10T06:57:42.960 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:42 vm05 ceph-mon[48591]: pgmap v43: 1 pgs: 1 active+clean; 449 KiB data, 22 MiB used, 80 GiB / 80 GiB avail 2026-03-10T06:57:42.960 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:42 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:42.960 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:42 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:57:42.960 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:42 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:57:42.960 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:42 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:57:42.960 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:42 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:42.960 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:42 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:42.960 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:42 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:42.960 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:42 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:57:42.960 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:42 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:57:42.960 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:42 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:57:43.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:42 vm02 ceph-mon[54377]: pgmap v43: 1 pgs: 1 active+clean; 449 KiB data, 22 MiB used, 80 GiB / 80 GiB avail 2026-03-10T06:57:43.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:42 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:43.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:42 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:57:43.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:42 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:57:43.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:42 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:57:43.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:42 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:43.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:42 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:43.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:42 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:43.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:42 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:57:43.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:42 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:57:43.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:42 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:57:43.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:42 vm02 ceph-mon[50158]: pgmap v43: 1 pgs: 1 active+clean; 449 KiB data, 22 MiB used, 80 GiB / 80 GiB avail 2026-03-10T06:57:43.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:42 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:43.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:42 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:57:43.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:42 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:57:43.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:42 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:57:43.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:42 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:43.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:42 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:43.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:42 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:43.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:42 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:57:43.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:42 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:57:43.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:42 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:57:43.221 INFO:journalctl@ceph.osd.4.vm05.stdout:Mar 10 06:57:43 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-4[52009]: 2026-03-10T06:57:43.163+0000 7f3da211b3c0 -1 osd.4 0 log_to_monitors true 2026-03-10T06:57:43.510 INFO:teuthology.orchestra.run.vm05.stdout: 2026-03-10T06:57:43.524 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc -- ceph orch daemon add osd vm05:/dev/vdd 2026-03-10T06:57:44.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:43 vm02 ceph-mon[54377]: from='osd.4 [v2:192.168.123.105:6800/3902980907,v1:192.168.123.105:6801/3902980907]' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-10T06:57:44.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:43 vm02 ceph-mon[54377]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-10T06:57:44.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:43 vm02 ceph-mon[50158]: from='osd.4 [v2:192.168.123.105:6800/3902980907,v1:192.168.123.105:6801/3902980907]' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-10T06:57:44.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:43 vm02 ceph-mon[50158]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-10T06:57:44.087 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:43 vm05 ceph-mon[48591]: from='osd.4 [v2:192.168.123.105:6800/3902980907,v1:192.168.123.105:6801/3902980907]' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-10T06:57:44.087 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:43 vm05 ceph-mon[48591]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-10T06:57:45.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:44 vm05 ceph-mon[48591]: pgmap v44: 1 pgs: 1 active+clean; 449 KiB data, 22 MiB used, 80 GiB / 80 GiB avail 2026-03-10T06:57:45.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:44 vm05 ceph-mon[48591]: from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]': finished 2026-03-10T06:57:45.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:44 vm05 ceph-mon[48591]: osdmap e22: 5 total, 4 up, 5 in 2026-03-10T06:57:45.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:44 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T06:57:45.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:44 vm05 ceph-mon[48591]: from='osd.4 [v2:192.168.123.105:6800/3902980907,v1:192.168.123.105:6801/3902980907]' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm05", "root=default"]}]: dispatch 2026-03-10T06:57:45.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:44 vm05 ceph-mon[48591]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm05", "root=default"]}]: dispatch 2026-03-10T06:57:45.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:44 vm05 ceph-mon[48591]: from='client.24188 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm05:/dev/vdd", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T06:57:45.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:44 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T06:57:45.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:44 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T06:57:45.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:44 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:57:45.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:44 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:45.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:44 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-10T06:57:45.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:44 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:45.004 INFO:journalctl@ceph.osd.4.vm05.stdout:Mar 10 06:57:44 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-4[52009]: 2026-03-10T06:57:44.850+0000 7f3d98b1e700 -1 osd.4 0 waiting for initial osdmap 2026-03-10T06:57:45.004 INFO:journalctl@ceph.osd.4.vm05.stdout:Mar 10 06:57:44 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-4[52009]: 2026-03-10T06:57:44.856+0000 7f3d954b9700 -1 osd.4 23 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-10T06:57:45.334 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:44 vm02 ceph-mon[54377]: pgmap v44: 1 pgs: 1 active+clean; 449 KiB data, 22 MiB used, 80 GiB / 80 GiB avail 2026-03-10T06:57:45.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:44 vm02 ceph-mon[54377]: from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]': finished 2026-03-10T06:57:45.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:44 vm02 ceph-mon[54377]: osdmap e22: 5 total, 4 up, 5 in 2026-03-10T06:57:45.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:44 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T06:57:45.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:44 vm02 ceph-mon[54377]: from='osd.4 [v2:192.168.123.105:6800/3902980907,v1:192.168.123.105:6801/3902980907]' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm05", "root=default"]}]: dispatch 2026-03-10T06:57:45.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:44 vm02 ceph-mon[54377]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm05", "root=default"]}]: dispatch 2026-03-10T06:57:45.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:44 vm02 ceph-mon[54377]: from='client.24188 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm05:/dev/vdd", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T06:57:45.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:44 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T06:57:45.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:44 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T06:57:45.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:44 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:57:45.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:44 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:45.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:44 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-10T06:57:45.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:44 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:45.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:44 vm02 ceph-mon[50158]: pgmap v44: 1 pgs: 1 active+clean; 449 KiB data, 22 MiB used, 80 GiB / 80 GiB avail 2026-03-10T06:57:45.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:44 vm02 ceph-mon[50158]: from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]': finished 2026-03-10T06:57:45.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:44 vm02 ceph-mon[50158]: osdmap e22: 5 total, 4 up, 5 in 2026-03-10T06:57:45.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:44 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T06:57:45.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:44 vm02 ceph-mon[50158]: from='osd.4 [v2:192.168.123.105:6800/3902980907,v1:192.168.123.105:6801/3902980907]' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm05", "root=default"]}]: dispatch 2026-03-10T06:57:45.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:44 vm02 ceph-mon[50158]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm05", "root=default"]}]: dispatch 2026-03-10T06:57:45.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:44 vm02 ceph-mon[50158]: from='client.24188 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm05:/dev/vdd", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T06:57:45.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:44 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T06:57:45.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:44 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T06:57:45.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:44 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:57:45.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:44 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:45.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:44 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-10T06:57:45.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:44 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:46.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:45 vm05 ceph-mon[48591]: Detected new or changed devices on vm05 2026-03-10T06:57:46.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:45 vm05 ceph-mon[48591]: Adjusting osd_memory_target on vm05 to 257.0M 2026-03-10T06:57:46.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:45 vm05 ceph-mon[48591]: Unable to set osd_memory_target on vm05 to 269527859: error parsing value: Value '269527859' is below minimum 939524096 2026-03-10T06:57:46.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:45 vm05 ceph-mon[48591]: from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm05", "root=default"]}]': finished 2026-03-10T06:57:46.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:45 vm05 ceph-mon[48591]: osdmap e23: 5 total, 4 up, 5 in 2026-03-10T06:57:46.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:45 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T06:57:46.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:45 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T06:57:46.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:45 vm05 ceph-mon[48591]: from='client.? 192.168.123.105:0/3862832388' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "4cf77df5-c5c7-4d20-b47b-ed4598f3fb56"}]: dispatch 2026-03-10T06:57:46.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:45 vm05 ceph-mon[48591]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "4cf77df5-c5c7-4d20-b47b-ed4598f3fb56"}]: dispatch 2026-03-10T06:57:46.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:45 vm05 ceph-mon[48591]: osd.4 [v2:192.168.123.105:6800/3902980907,v1:192.168.123.105:6801/3902980907] boot 2026-03-10T06:57:46.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:45 vm05 ceph-mon[48591]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "4cf77df5-c5c7-4d20-b47b-ed4598f3fb56"}]': finished 2026-03-10T06:57:46.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:45 vm05 ceph-mon[48591]: osdmap e24: 6 total, 5 up, 6 in 2026-03-10T06:57:46.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:45 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T06:57:46.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:45 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T06:57:46.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:45 vm05 ceph-mon[48591]: from='client.? 192.168.123.105:0/2573927113' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T06:57:46.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:45 vm02 ceph-mon[54377]: Detected new or changed devices on vm05 2026-03-10T06:57:46.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:45 vm02 ceph-mon[54377]: Adjusting osd_memory_target on vm05 to 257.0M 2026-03-10T06:57:46.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:45 vm02 ceph-mon[54377]: Unable to set osd_memory_target on vm05 to 269527859: error parsing value: Value '269527859' is below minimum 939524096 2026-03-10T06:57:46.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:45 vm02 ceph-mon[54377]: from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm05", "root=default"]}]': finished 2026-03-10T06:57:46.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:45 vm02 ceph-mon[54377]: osdmap e23: 5 total, 4 up, 5 in 2026-03-10T06:57:46.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:45 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T06:57:46.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:45 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T06:57:46.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:45 vm02 ceph-mon[54377]: from='client.? 192.168.123.105:0/3862832388' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "4cf77df5-c5c7-4d20-b47b-ed4598f3fb56"}]: dispatch 2026-03-10T06:57:46.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:45 vm02 ceph-mon[54377]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "4cf77df5-c5c7-4d20-b47b-ed4598f3fb56"}]: dispatch 2026-03-10T06:57:46.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:45 vm02 ceph-mon[54377]: osd.4 [v2:192.168.123.105:6800/3902980907,v1:192.168.123.105:6801/3902980907] boot 2026-03-10T06:57:46.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:45 vm02 ceph-mon[54377]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "4cf77df5-c5c7-4d20-b47b-ed4598f3fb56"}]': finished 2026-03-10T06:57:46.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:45 vm02 ceph-mon[54377]: osdmap e24: 6 total, 5 up, 6 in 2026-03-10T06:57:46.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:45 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T06:57:46.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:45 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T06:57:46.336 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:45 vm02 ceph-mon[54377]: from='client.? 192.168.123.105:0/2573927113' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T06:57:46.336 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:45 vm02 ceph-mon[50158]: Detected new or changed devices on vm05 2026-03-10T06:57:46.336 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:45 vm02 ceph-mon[50158]: Adjusting osd_memory_target on vm05 to 257.0M 2026-03-10T06:57:46.336 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:45 vm02 ceph-mon[50158]: Unable to set osd_memory_target on vm05 to 269527859: error parsing value: Value '269527859' is below minimum 939524096 2026-03-10T06:57:46.336 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:45 vm02 ceph-mon[50158]: from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm05", "root=default"]}]': finished 2026-03-10T06:57:46.336 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:45 vm02 ceph-mon[50158]: osdmap e23: 5 total, 4 up, 5 in 2026-03-10T06:57:46.336 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:45 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T06:57:46.336 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:45 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T06:57:46.336 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:45 vm02 ceph-mon[50158]: from='client.? 192.168.123.105:0/3862832388' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "4cf77df5-c5c7-4d20-b47b-ed4598f3fb56"}]: dispatch 2026-03-10T06:57:46.336 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:45 vm02 ceph-mon[50158]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "4cf77df5-c5c7-4d20-b47b-ed4598f3fb56"}]: dispatch 2026-03-10T06:57:46.336 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:45 vm02 ceph-mon[50158]: osd.4 [v2:192.168.123.105:6800/3902980907,v1:192.168.123.105:6801/3902980907] boot 2026-03-10T06:57:46.336 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:45 vm02 ceph-mon[50158]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "4cf77df5-c5c7-4d20-b47b-ed4598f3fb56"}]': finished 2026-03-10T06:57:46.336 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:45 vm02 ceph-mon[50158]: osdmap e24: 6 total, 5 up, 6 in 2026-03-10T06:57:46.336 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:45 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T06:57:46.336 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:45 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T06:57:46.336 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:45 vm02 ceph-mon[50158]: from='client.? 192.168.123.105:0/2573927113' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T06:57:47.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:46 vm05 ceph-mon[48591]: purged_snaps scrub starts 2026-03-10T06:57:47.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:46 vm05 ceph-mon[48591]: purged_snaps scrub ok 2026-03-10T06:57:47.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:46 vm05 ceph-mon[48591]: pgmap v48: 1 pgs: 1 active+clean; 449 KiB data, 22 MiB used, 80 GiB / 80 GiB avail 2026-03-10T06:57:47.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:46 vm05 ceph-mon[48591]: osdmap e25: 6 total, 5 up, 6 in 2026-03-10T06:57:47.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:46 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T06:57:47.334 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:46 vm02 ceph-mon[54377]: purged_snaps scrub starts 2026-03-10T06:57:47.334 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:46 vm02 ceph-mon[54377]: purged_snaps scrub ok 2026-03-10T06:57:47.334 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:46 vm02 ceph-mon[54377]: pgmap v48: 1 pgs: 1 active+clean; 449 KiB data, 22 MiB used, 80 GiB / 80 GiB avail 2026-03-10T06:57:47.334 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:46 vm02 ceph-mon[54377]: osdmap e25: 6 total, 5 up, 6 in 2026-03-10T06:57:47.334 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:46 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T06:57:47.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:46 vm02 ceph-mon[50158]: purged_snaps scrub starts 2026-03-10T06:57:47.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:46 vm02 ceph-mon[50158]: purged_snaps scrub ok 2026-03-10T06:57:47.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:46 vm02 ceph-mon[50158]: pgmap v48: 1 pgs: 1 active+clean; 449 KiB data, 22 MiB used, 80 GiB / 80 GiB avail 2026-03-10T06:57:47.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:46 vm02 ceph-mon[50158]: osdmap e25: 6 total, 5 up, 6 in 2026-03-10T06:57:47.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:46 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T06:57:48.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:47 vm05 ceph-mon[48591]: osdmap e26: 6 total, 5 up, 6 in 2026-03-10T06:57:48.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:47 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T06:57:48.334 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:47 vm02 ceph-mon[54377]: osdmap e26: 6 total, 5 up, 6 in 2026-03-10T06:57:48.334 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:47 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T06:57:48.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:47 vm02 ceph-mon[50158]: osdmap e26: 6 total, 5 up, 6 in 2026-03-10T06:57:48.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:47 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T06:57:49.144 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:48 vm05 ceph-mon[48591]: pgmap v51: 1 pgs: 1 remapped+peering; 449 KiB data, 28 MiB used, 100 GiB / 100 GiB avail 2026-03-10T06:57:49.144 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:48 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.5"}]: dispatch 2026-03-10T06:57:49.144 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:48 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:57:49.334 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:48 vm02 ceph-mon[54377]: pgmap v51: 1 pgs: 1 remapped+peering; 449 KiB data, 28 MiB used, 100 GiB / 100 GiB avail 2026-03-10T06:57:49.334 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:48 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.5"}]: dispatch 2026-03-10T06:57:49.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:48 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:57:49.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:48 vm02 ceph-mon[50158]: pgmap v51: 1 pgs: 1 remapped+peering; 449 KiB data, 28 MiB used, 100 GiB / 100 GiB avail 2026-03-10T06:57:49.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:48 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.5"}]: dispatch 2026-03-10T06:57:49.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:48 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:57:50.208 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:50 vm05 ceph-mon[48591]: Deploying daemon osd.5 on vm05 2026-03-10T06:57:50.208 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:50 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:50.208 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:50 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:57:50.208 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:50 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:57:50.208 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:50 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:57:50.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:50 vm02 ceph-mon[54377]: Deploying daemon osd.5 on vm05 2026-03-10T06:57:50.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:50 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:50.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:50 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:57:50.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:50 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:57:50.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:50 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:57:50.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:50 vm02 ceph-mon[50158]: Deploying daemon osd.5 on vm05 2026-03-10T06:57:50.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:50 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:50.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:50 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:57:50.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:50 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:57:50.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:50 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:57:50.749 INFO:teuthology.orchestra.run.vm05.stdout:Created osd(s) 5 on host 'vm05' 2026-03-10T06:57:50.812 DEBUG:teuthology.orchestra.run.vm05:osd.5> sudo journalctl -f -n 0 -u ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@osd.5.service 2026-03-10T06:57:50.814 INFO:tasks.cephadm:Deploying osd.6 on vm05 with /dev/vdc... 2026-03-10T06:57:50.814 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc -- lvm zap /dev/vdc 2026-03-10T06:57:51.505 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:51 vm05 ceph-mon[48591]: pgmap v52: 1 pgs: 1 remapped+peering; 449 KiB data, 28 MiB used, 100 GiB / 100 GiB avail 2026-03-10T06:57:51.505 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:51 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:51.505 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:51 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:51.566 INFO:teuthology.orchestra.run.vm05.stdout: 2026-03-10T06:57:51.583 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc -- ceph orch daemon add osd vm05:/dev/vdc 2026-03-10T06:57:51.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:51 vm02 ceph-mon[54377]: pgmap v52: 1 pgs: 1 remapped+peering; 449 KiB data, 28 MiB used, 100 GiB / 100 GiB avail 2026-03-10T06:57:51.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:51 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:51.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:51 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:51.584 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:51 vm02 ceph-mon[50158]: pgmap v52: 1 pgs: 1 remapped+peering; 449 KiB data, 28 MiB used, 100 GiB / 100 GiB avail 2026-03-10T06:57:51.584 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:51 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:51.584 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:51 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:51.783 INFO:journalctl@ceph.osd.5.vm05.stdout:Mar 10 06:57:51 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-5[54725]: 2026-03-10T06:57:51.506+0000 7fec3126f3c0 -1 osd.5 0 log_to_monitors true 2026-03-10T06:57:52.414 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:52 vm05 ceph-mon[48591]: from='osd.5 [v2:192.168.123.105:6808/3731608217,v1:192.168.123.105:6809/3731608217]' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-10T06:57:52.414 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:52 vm05 ceph-mon[48591]: from='osd.5 ' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-10T06:57:52.414 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:52 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T06:57:52.414 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:52 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T06:57:52.414 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:52 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:57:52.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:52 vm02 ceph-mon[54377]: from='osd.5 [v2:192.168.123.105:6808/3731608217,v1:192.168.123.105:6809/3731608217]' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-10T06:57:52.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:52 vm02 ceph-mon[54377]: from='osd.5 ' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-10T06:57:52.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:52 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T06:57:52.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:52 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T06:57:52.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:52 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:57:52.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:52 vm02 ceph-mon[50158]: from='osd.5 [v2:192.168.123.105:6808/3731608217,v1:192.168.123.105:6809/3731608217]' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-10T06:57:52.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:52 vm02 ceph-mon[50158]: from='osd.5 ' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-10T06:57:52.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:52 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T06:57:52.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:52 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T06:57:52.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:52 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:57:53.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:53 vm05 ceph-mon[48591]: pgmap v53: 1 pgs: 1 active+clean; 449 KiB data, 28 MiB used, 100 GiB / 100 GiB avail; 60 KiB/s, 0 objects/s recovering 2026-03-10T06:57:53.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:53 vm05 ceph-mon[48591]: from='client.24215 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm05:/dev/vdc", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T06:57:53.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:53 vm05 ceph-mon[48591]: from='osd.5 ' entity='osd.5' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]': finished 2026-03-10T06:57:53.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:53 vm05 ceph-mon[48591]: osdmap e27: 6 total, 5 up, 6 in 2026-03-10T06:57:53.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:53 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T06:57:53.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:53 vm05 ceph-mon[48591]: from='osd.5 [v2:192.168.123.105:6808/3731608217,v1:192.168.123.105:6809/3731608217]' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm05", "root=default"]}]: dispatch 2026-03-10T06:57:53.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:53 vm05 ceph-mon[48591]: from='osd.5 ' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm05", "root=default"]}]: dispatch 2026-03-10T06:57:53.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:53 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:53.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:53 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-10T06:57:53.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:53 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.5", "name": "osd_memory_target"}]: dispatch 2026-03-10T06:57:53.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:53 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:53.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:53 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:57:53.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:53 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:57:53.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:53 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:57:53.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:53 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:53.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:53 vm05 ceph-mon[48591]: from='client.? 192.168.123.105:0/2695841438' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "d5098a9b-8d57-4249-b546-8ac52d23059a"}]: dispatch 2026-03-10T06:57:53.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:53 vm05 ceph-mon[48591]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "d5098a9b-8d57-4249-b546-8ac52d23059a"}]: dispatch 2026-03-10T06:57:53.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:53 vm05 ceph-mon[48591]: from='osd.5 ' entity='osd.5' cmd='[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm05", "root=default"]}]': finished 2026-03-10T06:57:53.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:53 vm05 ceph-mon[48591]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "d5098a9b-8d57-4249-b546-8ac52d23059a"}]': finished 2026-03-10T06:57:53.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:53 vm05 ceph-mon[48591]: osdmap e28: 7 total, 5 up, 7 in 2026-03-10T06:57:53.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:53 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T06:57:53.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:53 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T06:57:53.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:53 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T06:57:53.504 INFO:journalctl@ceph.osd.5.vm05.stdout:Mar 10 06:57:53 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-5[54725]: 2026-03-10T06:57:53.017+0000 7fec29475700 -1 osd.5 0 waiting for initial osdmap 2026-03-10T06:57:53.504 INFO:journalctl@ceph.osd.5.vm05.stdout:Mar 10 06:57:53 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-5[54725]: 2026-03-10T06:57:53.025+0000 7fec23e0c700 -1 osd.5 28 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-10T06:57:53.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:53 vm02 ceph-mon[54377]: pgmap v53: 1 pgs: 1 active+clean; 449 KiB data, 28 MiB used, 100 GiB / 100 GiB avail; 60 KiB/s, 0 objects/s recovering 2026-03-10T06:57:53.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:53 vm02 ceph-mon[54377]: from='client.24215 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm05:/dev/vdc", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T06:57:53.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:53 vm02 ceph-mon[54377]: from='osd.5 ' entity='osd.5' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]': finished 2026-03-10T06:57:53.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:53 vm02 ceph-mon[54377]: osdmap e27: 6 total, 5 up, 6 in 2026-03-10T06:57:53.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:53 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T06:57:53.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:53 vm02 ceph-mon[54377]: from='osd.5 [v2:192.168.123.105:6808/3731608217,v1:192.168.123.105:6809/3731608217]' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm05", "root=default"]}]: dispatch 2026-03-10T06:57:53.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:53 vm02 ceph-mon[54377]: from='osd.5 ' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm05", "root=default"]}]: dispatch 2026-03-10T06:57:53.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:53 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:53.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:53 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-10T06:57:53.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:53 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.5", "name": "osd_memory_target"}]: dispatch 2026-03-10T06:57:53.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:53 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:53.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:53 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:57:53.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:53 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:57:53.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:53 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:57:53.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:53 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:53.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:53 vm02 ceph-mon[54377]: from='client.? 192.168.123.105:0/2695841438' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "d5098a9b-8d57-4249-b546-8ac52d23059a"}]: dispatch 2026-03-10T06:57:53.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:53 vm02 ceph-mon[54377]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "d5098a9b-8d57-4249-b546-8ac52d23059a"}]: dispatch 2026-03-10T06:57:53.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:53 vm02 ceph-mon[54377]: from='osd.5 ' entity='osd.5' cmd='[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm05", "root=default"]}]': finished 2026-03-10T06:57:53.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:53 vm02 ceph-mon[54377]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "d5098a9b-8d57-4249-b546-8ac52d23059a"}]': finished 2026-03-10T06:57:53.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:53 vm02 ceph-mon[54377]: osdmap e28: 7 total, 5 up, 7 in 2026-03-10T06:57:53.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:53 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T06:57:53.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:53 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T06:57:53.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:53 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T06:57:53.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:53 vm02 ceph-mon[50158]: pgmap v53: 1 pgs: 1 active+clean; 449 KiB data, 28 MiB used, 100 GiB / 100 GiB avail; 60 KiB/s, 0 objects/s recovering 2026-03-10T06:57:53.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:53 vm02 ceph-mon[50158]: from='client.24215 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm05:/dev/vdc", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T06:57:53.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:53 vm02 ceph-mon[50158]: from='osd.5 ' entity='osd.5' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]': finished 2026-03-10T06:57:53.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:53 vm02 ceph-mon[50158]: osdmap e27: 6 total, 5 up, 6 in 2026-03-10T06:57:53.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:53 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T06:57:53.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:53 vm02 ceph-mon[50158]: from='osd.5 [v2:192.168.123.105:6808/3731608217,v1:192.168.123.105:6809/3731608217]' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm05", "root=default"]}]: dispatch 2026-03-10T06:57:53.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:53 vm02 ceph-mon[50158]: from='osd.5 ' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm05", "root=default"]}]: dispatch 2026-03-10T06:57:53.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:53 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:53.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:53 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-10T06:57:53.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:53 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.5", "name": "osd_memory_target"}]: dispatch 2026-03-10T06:57:53.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:53 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:53.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:53 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:57:53.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:53 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:57:53.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:53 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:57:53.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:53 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:53.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:53 vm02 ceph-mon[50158]: from='client.? 192.168.123.105:0/2695841438' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "d5098a9b-8d57-4249-b546-8ac52d23059a"}]: dispatch 2026-03-10T06:57:53.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:53 vm02 ceph-mon[50158]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "d5098a9b-8d57-4249-b546-8ac52d23059a"}]: dispatch 2026-03-10T06:57:53.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:53 vm02 ceph-mon[50158]: from='osd.5 ' entity='osd.5' cmd='[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm05", "root=default"]}]': finished 2026-03-10T06:57:53.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:53 vm02 ceph-mon[50158]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "d5098a9b-8d57-4249-b546-8ac52d23059a"}]': finished 2026-03-10T06:57:53.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:53 vm02 ceph-mon[50158]: osdmap e28: 7 total, 5 up, 7 in 2026-03-10T06:57:53.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:53 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T06:57:53.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:53 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T06:57:53.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:53 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T06:57:54.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:54 vm05 ceph-mon[48591]: Detected new or changed devices on vm05 2026-03-10T06:57:54.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:54 vm05 ceph-mon[48591]: Adjusting osd_memory_target on vm05 to 128.5M 2026-03-10T06:57:54.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:54 vm05 ceph-mon[48591]: Unable to set osd_memory_target on vm05 to 134763929: error parsing value: Value '134763929' is below minimum 939524096 2026-03-10T06:57:54.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:54 vm05 ceph-mon[48591]: from='client.? 192.168.123.105:0/2744201461' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T06:57:54.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:54 vm05 ceph-mon[48591]: osd.5 [v2:192.168.123.105:6808/3731608217,v1:192.168.123.105:6809/3731608217] boot 2026-03-10T06:57:54.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:54 vm05 ceph-mon[48591]: osdmap e29: 7 total, 6 up, 7 in 2026-03-10T06:57:54.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:54 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T06:57:54.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:54 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T06:57:54.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:54 vm05 ceph-mon[48591]: osdmap e30: 7 total, 6 up, 7 in 2026-03-10T06:57:54.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:54 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T06:57:54.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:54 vm02 ceph-mon[54377]: Detected new or changed devices on vm05 2026-03-10T06:57:54.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:54 vm02 ceph-mon[54377]: Adjusting osd_memory_target on vm05 to 128.5M 2026-03-10T06:57:54.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:54 vm02 ceph-mon[54377]: Unable to set osd_memory_target on vm05 to 134763929: error parsing value: Value '134763929' is below minimum 939524096 2026-03-10T06:57:54.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:54 vm02 ceph-mon[54377]: from='client.? 192.168.123.105:0/2744201461' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T06:57:54.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:54 vm02 ceph-mon[54377]: osd.5 [v2:192.168.123.105:6808/3731608217,v1:192.168.123.105:6809/3731608217] boot 2026-03-10T06:57:54.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:54 vm02 ceph-mon[54377]: osdmap e29: 7 total, 6 up, 7 in 2026-03-10T06:57:54.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:54 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T06:57:54.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:54 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T06:57:54.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:54 vm02 ceph-mon[54377]: osdmap e30: 7 total, 6 up, 7 in 2026-03-10T06:57:54.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:54 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T06:57:54.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:54 vm02 ceph-mon[50158]: Detected new or changed devices on vm05 2026-03-10T06:57:54.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:54 vm02 ceph-mon[50158]: Adjusting osd_memory_target on vm05 to 128.5M 2026-03-10T06:57:54.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:54 vm02 ceph-mon[50158]: Unable to set osd_memory_target on vm05 to 134763929: error parsing value: Value '134763929' is below minimum 939524096 2026-03-10T06:57:54.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:54 vm02 ceph-mon[50158]: from='client.? 192.168.123.105:0/2744201461' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T06:57:54.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:54 vm02 ceph-mon[50158]: osd.5 [v2:192.168.123.105:6808/3731608217,v1:192.168.123.105:6809/3731608217] boot 2026-03-10T06:57:54.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:54 vm02 ceph-mon[50158]: osdmap e29: 7 total, 6 up, 7 in 2026-03-10T06:57:54.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:54 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T06:57:54.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:54 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T06:57:54.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:54 vm02 ceph-mon[50158]: osdmap e30: 7 total, 6 up, 7 in 2026-03-10T06:57:54.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:54 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T06:57:55.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:55 vm05 ceph-mon[48591]: purged_snaps scrub starts 2026-03-10T06:57:55.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:55 vm05 ceph-mon[48591]: purged_snaps scrub ok 2026-03-10T06:57:55.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:55 vm05 ceph-mon[48591]: pgmap v56: 1 pgs: 1 active+clean; 449 KiB data, 28 MiB used, 100 GiB / 100 GiB avail; 60 KiB/s, 0 objects/s recovering 2026-03-10T06:57:55.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:55 vm05 ceph-mon[48591]: osdmap e31: 7 total, 6 up, 7 in 2026-03-10T06:57:55.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:55 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T06:57:55.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:55 vm02 ceph-mon[54377]: purged_snaps scrub starts 2026-03-10T06:57:55.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:55 vm02 ceph-mon[54377]: purged_snaps scrub ok 2026-03-10T06:57:55.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:55 vm02 ceph-mon[54377]: pgmap v56: 1 pgs: 1 active+clean; 449 KiB data, 28 MiB used, 100 GiB / 100 GiB avail; 60 KiB/s, 0 objects/s recovering 2026-03-10T06:57:55.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:55 vm02 ceph-mon[54377]: osdmap e31: 7 total, 6 up, 7 in 2026-03-10T06:57:55.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:55 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T06:57:55.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:55 vm02 ceph-mon[50158]: purged_snaps scrub starts 2026-03-10T06:57:55.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:55 vm02 ceph-mon[50158]: purged_snaps scrub ok 2026-03-10T06:57:55.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:55 vm02 ceph-mon[50158]: pgmap v56: 1 pgs: 1 active+clean; 449 KiB data, 28 MiB used, 100 GiB / 100 GiB avail; 60 KiB/s, 0 objects/s recovering 2026-03-10T06:57:55.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:55 vm02 ceph-mon[50158]: osdmap e31: 7 total, 6 up, 7 in 2026-03-10T06:57:55.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:55 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T06:57:57.356 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:57 vm05 ceph-mon[48591]: pgmap v60: 1 pgs: 1 remapped+peering; 449 KiB data, 33 MiB used, 120 GiB / 120 GiB avail 2026-03-10T06:57:57.356 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:57 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.6"}]: dispatch 2026-03-10T06:57:57.356 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:57 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:57:57.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:57 vm02 ceph-mon[54377]: pgmap v60: 1 pgs: 1 remapped+peering; 449 KiB data, 33 MiB used, 120 GiB / 120 GiB avail 2026-03-10T06:57:57.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:57 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.6"}]: dispatch 2026-03-10T06:57:57.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:57 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:57:57.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:57 vm02 ceph-mon[50158]: pgmap v60: 1 pgs: 1 remapped+peering; 449 KiB data, 33 MiB used, 120 GiB / 120 GiB avail 2026-03-10T06:57:57.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:57 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.6"}]: dispatch 2026-03-10T06:57:57.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:57 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:57:58.423 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:58 vm05 ceph-mon[48591]: Deploying daemon osd.6 on vm05 2026-03-10T06:57:58.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:58 vm02 ceph-mon[54377]: Deploying daemon osd.6 on vm05 2026-03-10T06:57:58.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:58 vm02 ceph-mon[50158]: Deploying daemon osd.6 on vm05 2026-03-10T06:57:59.320 INFO:teuthology.orchestra.run.vm05.stdout:Created osd(s) 6 on host 'vm05' 2026-03-10T06:57:59.371 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:59 vm05 ceph-mon[48591]: pgmap v61: 1 pgs: 1 remapped+peering; 449 KiB data, 34 MiB used, 120 GiB / 120 GiB avail 2026-03-10T06:57:59.371 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:59 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:59.371 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:59 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:57:59.371 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:59 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:57:59.371 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:59 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:57:59.371 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:59 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:59.371 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:57:59 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:59.385 DEBUG:teuthology.orchestra.run.vm05:osd.6> sudo journalctl -f -n 0 -u ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@osd.6.service 2026-03-10T06:57:59.387 INFO:tasks.cephadm:Deploying osd.7 on vm05 with /dev/vdb... 2026-03-10T06:57:59.387 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc -- lvm zap /dev/vdb 2026-03-10T06:57:59.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:59 vm02 ceph-mon[54377]: pgmap v61: 1 pgs: 1 remapped+peering; 449 KiB data, 34 MiB used, 120 GiB / 120 GiB avail 2026-03-10T06:57:59.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:59 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:59.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:59 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:57:59.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:59 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:57:59.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:59 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:57:59.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:59 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:59.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:57:59 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:59.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:59 vm02 ceph-mon[50158]: pgmap v61: 1 pgs: 1 remapped+peering; 449 KiB data, 34 MiB used, 120 GiB / 120 GiB avail 2026-03-10T06:57:59.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:59 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:59.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:59 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:57:59.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:59 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:57:59.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:59 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:57:59.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:59 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:57:59.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:57:59 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:58:00.274 INFO:journalctl@ceph.osd.6.vm05.stdout:Mar 10 06:58:00 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-6[57533]: 2026-03-10T06:58:00.017+0000 7f57c2a9b3c0 -1 osd.6 0 log_to_monitors true 2026-03-10T06:58:00.544 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:00 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:58:00.544 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:00 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:58:00.544 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:00 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:58:00.544 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:00 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:58:00.544 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:00 vm05 ceph-mon[48591]: pgmap v62: 1 pgs: 1 active+clean; 449 KiB data, 34 MiB used, 120 GiB / 120 GiB avail 2026-03-10T06:58:00.544 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:00 vm05 ceph-mon[48591]: from='osd.6 [v2:192.168.123.105:6816/576011160,v1:192.168.123.105:6817/576011160]' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-10T06:58:00.544 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:00 vm05 ceph-mon[48591]: from='osd.6 ' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-10T06:58:00.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:00 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:58:00.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:00 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:58:00.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:00 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:58:00.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:00 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:58:00.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:00 vm02 ceph-mon[54377]: pgmap v62: 1 pgs: 1 active+clean; 449 KiB data, 34 MiB used, 120 GiB / 120 GiB avail 2026-03-10T06:58:00.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:00 vm02 ceph-mon[54377]: from='osd.6 [v2:192.168.123.105:6816/576011160,v1:192.168.123.105:6817/576011160]' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-10T06:58:00.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:00 vm02 ceph-mon[54377]: from='osd.6 ' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-10T06:58:00.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:00 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:58:00.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:00 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:58:00.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:00 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:58:00.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:00 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:58:00.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:00 vm02 ceph-mon[50158]: pgmap v62: 1 pgs: 1 active+clean; 449 KiB data, 34 MiB used, 120 GiB / 120 GiB avail 2026-03-10T06:58:00.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:00 vm02 ceph-mon[50158]: from='osd.6 [v2:192.168.123.105:6816/576011160,v1:192.168.123.105:6817/576011160]' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-10T06:58:00.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:00 vm02 ceph-mon[50158]: from='osd.6 ' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-10T06:58:01.028 INFO:teuthology.orchestra.run.vm05.stdout: 2026-03-10T06:58:01.045 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc -- ceph orch daemon add osd vm05:/dev/vdb 2026-03-10T06:58:01.551 INFO:journalctl@ceph.osd.6.vm05.stdout:Mar 10 06:58:01 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-6[57533]: 2026-03-10T06:58:01.350+0000 7f57b949e700 -1 osd.6 0 waiting for initial osdmap 2026-03-10T06:58:01.551 INFO:journalctl@ceph.osd.6.vm05.stdout:Mar 10 06:58:01 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-6[57533]: 2026-03-10T06:58:01.362+0000 7f57b3e35700 -1 osd.6 33 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-10T06:58:01.551 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:01 vm05 ceph-mon[48591]: from='osd.6 ' entity='osd.6' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]': finished 2026-03-10T06:58:01.551 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:01 vm05 ceph-mon[48591]: osdmap e32: 7 total, 6 up, 7 in 2026-03-10T06:58:01.551 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:01 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T06:58:01.551 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:01 vm05 ceph-mon[48591]: from='osd.6 [v2:192.168.123.105:6816/576011160,v1:192.168.123.105:6817/576011160]' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm05", "root=default"]}]: dispatch 2026-03-10T06:58:01.551 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:01 vm05 ceph-mon[48591]: from='osd.6 ' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm05", "root=default"]}]: dispatch 2026-03-10T06:58:01.551 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:01 vm05 ceph-mon[48591]: Detected new or changed devices on vm05 2026-03-10T06:58:01.551 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:01 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:58:01.551 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:01 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-10T06:58:01.551 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:01 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.5", "name": "osd_memory_target"}]: dispatch 2026-03-10T06:58:01.551 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:01 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.6", "name": "osd_memory_target"}]: dispatch 2026-03-10T06:58:01.551 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:01 vm05 ceph-mon[48591]: Adjusting osd_memory_target on vm05 to 87736k 2026-03-10T06:58:01.551 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:01 vm05 ceph-mon[48591]: Unable to set osd_memory_target on vm05 to 89842619: error parsing value: Value '89842619' is below minimum 939524096 2026-03-10T06:58:01.551 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:01 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:58:01.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:01 vm02 ceph-mon[54377]: from='osd.6 ' entity='osd.6' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]': finished 2026-03-10T06:58:01.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:01 vm02 ceph-mon[54377]: osdmap e32: 7 total, 6 up, 7 in 2026-03-10T06:58:01.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:01 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T06:58:01.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:01 vm02 ceph-mon[54377]: from='osd.6 [v2:192.168.123.105:6816/576011160,v1:192.168.123.105:6817/576011160]' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm05", "root=default"]}]: dispatch 2026-03-10T06:58:01.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:01 vm02 ceph-mon[54377]: from='osd.6 ' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm05", "root=default"]}]: dispatch 2026-03-10T06:58:01.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:01 vm02 ceph-mon[54377]: Detected new or changed devices on vm05 2026-03-10T06:58:01.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:01 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:58:01.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:01 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-10T06:58:01.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:01 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.5", "name": "osd_memory_target"}]: dispatch 2026-03-10T06:58:01.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:01 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.6", "name": "osd_memory_target"}]: dispatch 2026-03-10T06:58:01.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:01 vm02 ceph-mon[54377]: Adjusting osd_memory_target on vm05 to 87736k 2026-03-10T06:58:01.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:01 vm02 ceph-mon[54377]: Unable to set osd_memory_target on vm05 to 89842619: error parsing value: Value '89842619' is below minimum 939524096 2026-03-10T06:58:01.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:01 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:58:01.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:01 vm02 ceph-mon[50158]: from='osd.6 ' entity='osd.6' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]': finished 2026-03-10T06:58:01.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:01 vm02 ceph-mon[50158]: osdmap e32: 7 total, 6 up, 7 in 2026-03-10T06:58:01.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:01 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T06:58:01.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:01 vm02 ceph-mon[50158]: from='osd.6 [v2:192.168.123.105:6816/576011160,v1:192.168.123.105:6817/576011160]' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm05", "root=default"]}]: dispatch 2026-03-10T06:58:01.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:01 vm02 ceph-mon[50158]: from='osd.6 ' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm05", "root=default"]}]: dispatch 2026-03-10T06:58:01.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:01 vm02 ceph-mon[50158]: Detected new or changed devices on vm05 2026-03-10T06:58:01.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:01 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:58:01.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:01 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-10T06:58:01.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:01 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.5", "name": "osd_memory_target"}]: dispatch 2026-03-10T06:58:01.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:01 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.6", "name": "osd_memory_target"}]: dispatch 2026-03-10T06:58:01.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:01 vm02 ceph-mon[50158]: Adjusting osd_memory_target on vm05 to 87736k 2026-03-10T06:58:01.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:01 vm02 ceph-mon[50158]: Unable to set osd_memory_target on vm05 to 89842619: error parsing value: Value '89842619' is below minimum 939524096 2026-03-10T06:58:01.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:01 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:58:02.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:02 vm05 ceph-mon[48591]: from='osd.6 ' entity='osd.6' cmd='[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm05", "root=default"]}]': finished 2026-03-10T06:58:02.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:02 vm05 ceph-mon[48591]: osdmap e33: 7 total, 6 up, 7 in 2026-03-10T06:58:02.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:02 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T06:58:02.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:02 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T06:58:02.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:02 vm05 ceph-mon[48591]: pgmap v65: 1 pgs: 1 active+clean; 449 KiB data, 34 MiB used, 120 GiB / 120 GiB avail; 63 KiB/s, 0 objects/s recovering 2026-03-10T06:58:02.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:02 vm05 ceph-mon[48591]: from='client.24242 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm05:/dev/vdb", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T06:58:02.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:02 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T06:58:02.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:02 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T06:58:02.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:02 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:58:02.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:02 vm05 ceph-mon[48591]: from='client.? 192.168.123.105:0/1459717942' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "abb08ecf-81bc-4ddf-8ab4-1c14d5086c97"}]: dispatch 2026-03-10T06:58:02.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:02 vm05 ceph-mon[48591]: osd.6 [v2:192.168.123.105:6816/576011160,v1:192.168.123.105:6817/576011160] boot 2026-03-10T06:58:02.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:02 vm05 ceph-mon[48591]: from='client.? 192.168.123.105:0/1459717942' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "abb08ecf-81bc-4ddf-8ab4-1c14d5086c97"}]': finished 2026-03-10T06:58:02.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:02 vm05 ceph-mon[48591]: osdmap e34: 8 total, 7 up, 8 in 2026-03-10T06:58:02.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:02 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T06:58:02.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:02 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T06:58:02.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:02 vm02 ceph-mon[54377]: from='osd.6 ' entity='osd.6' cmd='[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm05", "root=default"]}]': finished 2026-03-10T06:58:02.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:02 vm02 ceph-mon[54377]: osdmap e33: 7 total, 6 up, 7 in 2026-03-10T06:58:02.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:02 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T06:58:02.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:02 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T06:58:02.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:02 vm02 ceph-mon[54377]: pgmap v65: 1 pgs: 1 active+clean; 449 KiB data, 34 MiB used, 120 GiB / 120 GiB avail; 63 KiB/s, 0 objects/s recovering 2026-03-10T06:58:02.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:02 vm02 ceph-mon[54377]: from='client.24242 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm05:/dev/vdb", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T06:58:02.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:02 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T06:58:02.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:02 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T06:58:02.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:02 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:58:02.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:02 vm02 ceph-mon[54377]: from='client.? 192.168.123.105:0/1459717942' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "abb08ecf-81bc-4ddf-8ab4-1c14d5086c97"}]: dispatch 2026-03-10T06:58:02.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:02 vm02 ceph-mon[54377]: osd.6 [v2:192.168.123.105:6816/576011160,v1:192.168.123.105:6817/576011160] boot 2026-03-10T06:58:02.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:02 vm02 ceph-mon[54377]: from='client.? 192.168.123.105:0/1459717942' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "abb08ecf-81bc-4ddf-8ab4-1c14d5086c97"}]': finished 2026-03-10T06:58:02.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:02 vm02 ceph-mon[54377]: osdmap e34: 8 total, 7 up, 8 in 2026-03-10T06:58:02.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:02 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T06:58:02.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:02 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T06:58:02.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:02 vm02 ceph-mon[50158]: from='osd.6 ' entity='osd.6' cmd='[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm05", "root=default"]}]': finished 2026-03-10T06:58:02.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:02 vm02 ceph-mon[50158]: osdmap e33: 7 total, 6 up, 7 in 2026-03-10T06:58:02.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:02 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T06:58:02.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:02 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T06:58:02.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:02 vm02 ceph-mon[50158]: pgmap v65: 1 pgs: 1 active+clean; 449 KiB data, 34 MiB used, 120 GiB / 120 GiB avail; 63 KiB/s, 0 objects/s recovering 2026-03-10T06:58:02.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:02 vm02 ceph-mon[50158]: from='client.24242 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm05:/dev/vdb", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T06:58:02.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:02 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T06:58:02.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:02 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T06:58:02.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:02 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:58:02.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:02 vm02 ceph-mon[50158]: from='client.? 192.168.123.105:0/1459717942' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "abb08ecf-81bc-4ddf-8ab4-1c14d5086c97"}]: dispatch 2026-03-10T06:58:02.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:02 vm02 ceph-mon[50158]: osd.6 [v2:192.168.123.105:6816/576011160,v1:192.168.123.105:6817/576011160] boot 2026-03-10T06:58:02.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:02 vm02 ceph-mon[50158]: from='client.? 192.168.123.105:0/1459717942' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "abb08ecf-81bc-4ddf-8ab4-1c14d5086c97"}]': finished 2026-03-10T06:58:02.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:02 vm02 ceph-mon[50158]: osdmap e34: 8 total, 7 up, 8 in 2026-03-10T06:58:02.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:02 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T06:58:02.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:02 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T06:58:03.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:03 vm05 ceph-mon[48591]: purged_snaps scrub starts 2026-03-10T06:58:03.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:03 vm05 ceph-mon[48591]: purged_snaps scrub ok 2026-03-10T06:58:03.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:03 vm05 ceph-mon[48591]: from='client.? 192.168.123.105:0/3765955094' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T06:58:03.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:03 vm05 ceph-mon[48591]: osdmap e35: 8 total, 7 up, 8 in 2026-03-10T06:58:03.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:03 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T06:58:03.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:03 vm02 ceph-mon[54377]: purged_snaps scrub starts 2026-03-10T06:58:03.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:03 vm02 ceph-mon[54377]: purged_snaps scrub ok 2026-03-10T06:58:03.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:03 vm02 ceph-mon[54377]: from='client.? 192.168.123.105:0/3765955094' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T06:58:03.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:03 vm02 ceph-mon[54377]: osdmap e35: 8 total, 7 up, 8 in 2026-03-10T06:58:03.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:03 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T06:58:03.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:03 vm02 ceph-mon[50158]: purged_snaps scrub starts 2026-03-10T06:58:03.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:03 vm02 ceph-mon[50158]: purged_snaps scrub ok 2026-03-10T06:58:03.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:03 vm02 ceph-mon[50158]: from='client.? 192.168.123.105:0/3765955094' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T06:58:03.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:03 vm02 ceph-mon[50158]: osdmap e35: 8 total, 7 up, 8 in 2026-03-10T06:58:03.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:03 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T06:58:05.390 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:05 vm05 ceph-mon[48591]: pgmap v68: 1 pgs: 1 active+clean; 449 KiB data, 39 MiB used, 140 GiB / 140 GiB avail 2026-03-10T06:58:05.390 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:05 vm05 ceph-mon[48591]: osdmap e36: 8 total, 7 up, 8 in 2026-03-10T06:58:05.390 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:05 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T06:58:05.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:05 vm02 ceph-mon[54377]: pgmap v68: 1 pgs: 1 active+clean; 449 KiB data, 39 MiB used, 140 GiB / 140 GiB avail 2026-03-10T06:58:05.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:05 vm02 ceph-mon[54377]: osdmap e36: 8 total, 7 up, 8 in 2026-03-10T06:58:05.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:05 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T06:58:05.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:05 vm02 ceph-mon[50158]: pgmap v68: 1 pgs: 1 active+clean; 449 KiB data, 39 MiB used, 140 GiB / 140 GiB avail 2026-03-10T06:58:05.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:05 vm02 ceph-mon[50158]: osdmap e36: 8 total, 7 up, 8 in 2026-03-10T06:58:05.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:05 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T06:58:06.422 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:06 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.7"}]: dispatch 2026-03-10T06:58:06.422 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:06 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:58:06.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:06 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.7"}]: dispatch 2026-03-10T06:58:06.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:06 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:58:06.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:06 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.7"}]: dispatch 2026-03-10T06:58:06.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:06 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:58:07.197 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:07 vm05 ceph-mon[48591]: pgmap v70: 1 pgs: 1 active+clean; 449 KiB data, 40 MiB used, 140 GiB / 140 GiB avail 2026-03-10T06:58:07.197 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:07 vm05 ceph-mon[48591]: Deploying daemon osd.7 on vm05 2026-03-10T06:58:07.197 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:07 vm05 ceph-mon[48591]: osdmap e37: 8 total, 7 up, 8 in 2026-03-10T06:58:07.197 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:07 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T06:58:07.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:07 vm02 ceph-mon[54377]: pgmap v70: 1 pgs: 1 active+clean; 449 KiB data, 40 MiB used, 140 GiB / 140 GiB avail 2026-03-10T06:58:07.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:07 vm02 ceph-mon[54377]: Deploying daemon osd.7 on vm05 2026-03-10T06:58:07.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:07 vm02 ceph-mon[54377]: osdmap e37: 8 total, 7 up, 8 in 2026-03-10T06:58:07.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:07 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T06:58:07.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:07 vm02 ceph-mon[50158]: pgmap v70: 1 pgs: 1 active+clean; 449 KiB data, 40 MiB used, 140 GiB / 140 GiB avail 2026-03-10T06:58:07.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:07 vm02 ceph-mon[50158]: Deploying daemon osd.7 on vm05 2026-03-10T06:58:07.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:07 vm02 ceph-mon[50158]: osdmap e37: 8 total, 7 up, 8 in 2026-03-10T06:58:07.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:07 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T06:58:08.204 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:08 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:58:08.204 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:08 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:58:08.204 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:08 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:58:08.204 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:08 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:58:08.384 INFO:teuthology.orchestra.run.vm05.stdout:Created osd(s) 7 on host 'vm05' 2026-03-10T06:58:08.457 DEBUG:teuthology.orchestra.run.vm05:osd.7> sudo journalctl -f -n 0 -u ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@osd.7.service 2026-03-10T06:58:08.458 INFO:tasks.cephadm:Waiting for 8 OSDs to come up... 2026-03-10T06:58:08.458 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc -- ceph osd stat -f json 2026-03-10T06:58:08.483 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:08 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:58:08.483 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:08 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:58:08.483 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:08 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:58:08.483 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:08 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:58:08.483 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:08 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:58:08.483 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:08 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:58:08.483 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:08 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:58:08.483 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:08 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:58:09.003 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 06:58:08 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-7[60290]: 2026-03-10T06:58:08.893+0000 7f074b2443c0 -1 osd.7 0 log_to_monitors true 2026-03-10T06:58:09.076 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-10T06:58:09.142 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":37,"num_osds":8,"num_up_osds":7,"osd_up_since":1773125882,"num_in_osds":8,"osd_in_since":1773125882,"num_remapped_pgs":0} 2026-03-10T06:58:09.334 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:09 vm02 ceph-mon[54377]: pgmap v72: 1 pgs: 1 active+clean; 449 KiB data, 40 MiB used, 140 GiB / 140 GiB avail 2026-03-10T06:58:09.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:09 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:58:09.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:09 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:58:09.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:09 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:58:09.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:09 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:58:09.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:09 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:58:09.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:09 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:58:09.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:09 vm02 ceph-mon[54377]: from='osd.7 [v2:192.168.123.105:6824/3692745270,v1:192.168.123.105:6825/3692745270]' entity='osd.7' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]: dispatch 2026-03-10T06:58:09.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:09 vm02 ceph-mon[54377]: from='osd.7 ' entity='osd.7' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]: dispatch 2026-03-10T06:58:09.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:09 vm02 ceph-mon[54377]: from='client.? 192.168.123.102:0/1377776950' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T06:58:09.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:09 vm02 ceph-mon[50158]: pgmap v72: 1 pgs: 1 active+clean; 449 KiB data, 40 MiB used, 140 GiB / 140 GiB avail 2026-03-10T06:58:09.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:09 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:58:09.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:09 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:58:09.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:09 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:58:09.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:09 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:58:09.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:09 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:58:09.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:09 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:58:09.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:09 vm02 ceph-mon[50158]: from='osd.7 [v2:192.168.123.105:6824/3692745270,v1:192.168.123.105:6825/3692745270]' entity='osd.7' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]: dispatch 2026-03-10T06:58:09.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:09 vm02 ceph-mon[50158]: from='osd.7 ' entity='osd.7' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]: dispatch 2026-03-10T06:58:09.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:09 vm02 ceph-mon[50158]: from='client.? 192.168.123.102:0/1377776950' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T06:58:09.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:09 vm05 ceph-mon[48591]: pgmap v72: 1 pgs: 1 active+clean; 449 KiB data, 40 MiB used, 140 GiB / 140 GiB avail 2026-03-10T06:58:09.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:09 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:58:09.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:09 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:58:09.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:09 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:58:09.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:09 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:58:09.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:09 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:58:09.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:09 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:58:09.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:09 vm05 ceph-mon[48591]: from='osd.7 [v2:192.168.123.105:6824/3692745270,v1:192.168.123.105:6825/3692745270]' entity='osd.7' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]: dispatch 2026-03-10T06:58:09.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:09 vm05 ceph-mon[48591]: from='osd.7 ' entity='osd.7' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]: dispatch 2026-03-10T06:58:09.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:09 vm05 ceph-mon[48591]: from='client.? 192.168.123.102:0/1377776950' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T06:58:10.143 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc -- ceph osd stat -f json 2026-03-10T06:58:10.430 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:10 vm02 ceph-mon[54377]: from='osd.7 ' entity='osd.7' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]': finished 2026-03-10T06:58:10.430 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:10 vm02 ceph-mon[54377]: osdmap e38: 8 total, 7 up, 8 in 2026-03-10T06:58:10.430 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:10 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T06:58:10.430 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:10 vm02 ceph-mon[54377]: from='osd.7 [v2:192.168.123.105:6824/3692745270,v1:192.168.123.105:6825/3692745270]' entity='osd.7' cmd=[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm05", "root=default"]}]: dispatch 2026-03-10T06:58:10.430 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:10 vm02 ceph-mon[54377]: from='osd.7 ' entity='osd.7' cmd=[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm05", "root=default"]}]: dispatch 2026-03-10T06:58:10.430 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:10 vm02 ceph-mon[54377]: pgmap v74: 1 pgs: 1 active+clean; 449 KiB data, 40 MiB used, 140 GiB / 140 GiB avail 2026-03-10T06:58:10.430 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:10 vm02 ceph-mon[54377]: Detected new or changed devices on vm05 2026-03-10T06:58:10.430 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:10 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:58:10.430 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:10 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-10T06:58:10.430 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:10 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.5", "name": "osd_memory_target"}]: dispatch 2026-03-10T06:58:10.430 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:10 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.6", "name": "osd_memory_target"}]: dispatch 2026-03-10T06:58:10.430 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:10 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.7", "name": "osd_memory_target"}]: dispatch 2026-03-10T06:58:10.430 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:10 vm02 ceph-mon[54377]: Adjusting osd_memory_target on vm05 to 65802k 2026-03-10T06:58:10.430 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:10 vm02 ceph-mon[54377]: Unable to set osd_memory_target on vm05 to 67381964: error parsing value: Value '67381964' is below minimum 939524096 2026-03-10T06:58:10.430 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:10 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:58:10.431 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:10 vm02 ceph-mon[50158]: from='osd.7 ' entity='osd.7' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]': finished 2026-03-10T06:58:10.431 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:10 vm02 ceph-mon[50158]: osdmap e38: 8 total, 7 up, 8 in 2026-03-10T06:58:10.431 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:10 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T06:58:10.431 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:10 vm02 ceph-mon[50158]: from='osd.7 [v2:192.168.123.105:6824/3692745270,v1:192.168.123.105:6825/3692745270]' entity='osd.7' cmd=[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm05", "root=default"]}]: dispatch 2026-03-10T06:58:10.431 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:10 vm02 ceph-mon[50158]: from='osd.7 ' entity='osd.7' cmd=[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm05", "root=default"]}]: dispatch 2026-03-10T06:58:10.431 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:10 vm02 ceph-mon[50158]: pgmap v74: 1 pgs: 1 active+clean; 449 KiB data, 40 MiB used, 140 GiB / 140 GiB avail 2026-03-10T06:58:10.431 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:10 vm02 ceph-mon[50158]: Detected new or changed devices on vm05 2026-03-10T06:58:10.431 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:10 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:58:10.431 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:10 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-10T06:58:10.431 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:10 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.5", "name": "osd_memory_target"}]: dispatch 2026-03-10T06:58:10.431 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:10 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.6", "name": "osd_memory_target"}]: dispatch 2026-03-10T06:58:10.431 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:10 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.7", "name": "osd_memory_target"}]: dispatch 2026-03-10T06:58:10.431 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:10 vm02 ceph-mon[50158]: Adjusting osd_memory_target on vm05 to 65802k 2026-03-10T06:58:10.431 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:10 vm02 ceph-mon[50158]: Unable to set osd_memory_target on vm05 to 67381964: error parsing value: Value '67381964' is below minimum 939524096 2026-03-10T06:58:10.431 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:10 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:58:10.639 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-10T06:58:10.708 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":39,"num_osds":8,"num_up_osds":7,"osd_up_since":1773125882,"num_in_osds":8,"osd_in_since":1773125882,"num_remapped_pgs":0} 2026-03-10T06:58:10.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:10 vm05 ceph-mon[48591]: from='osd.7 ' entity='osd.7' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]': finished 2026-03-10T06:58:10.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:10 vm05 ceph-mon[48591]: osdmap e38: 8 total, 7 up, 8 in 2026-03-10T06:58:10.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:10 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T06:58:10.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:10 vm05 ceph-mon[48591]: from='osd.7 [v2:192.168.123.105:6824/3692745270,v1:192.168.123.105:6825/3692745270]' entity='osd.7' cmd=[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm05", "root=default"]}]: dispatch 2026-03-10T06:58:10.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:10 vm05 ceph-mon[48591]: from='osd.7 ' entity='osd.7' cmd=[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm05", "root=default"]}]: dispatch 2026-03-10T06:58:10.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:10 vm05 ceph-mon[48591]: pgmap v74: 1 pgs: 1 active+clean; 449 KiB data, 40 MiB used, 140 GiB / 140 GiB avail 2026-03-10T06:58:10.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:10 vm05 ceph-mon[48591]: Detected new or changed devices on vm05 2026-03-10T06:58:10.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:10 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:58:10.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:10 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-10T06:58:10.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:10 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.5", "name": "osd_memory_target"}]: dispatch 2026-03-10T06:58:10.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:10 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.6", "name": "osd_memory_target"}]: dispatch 2026-03-10T06:58:10.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:10 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.7", "name": "osd_memory_target"}]: dispatch 2026-03-10T06:58:10.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:10 vm05 ceph-mon[48591]: Adjusting osd_memory_target on vm05 to 65802k 2026-03-10T06:58:10.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:10 vm05 ceph-mon[48591]: Unable to set osd_memory_target on vm05 to 67381964: error parsing value: Value '67381964' is below minimum 939524096 2026-03-10T06:58:10.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:10 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:58:10.753 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 06:58:10 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-7[60290]: 2026-03-10T06:58:10.404+0000 7f0741c47700 -1 osd.7 0 waiting for initial osdmap 2026-03-10T06:58:10.753 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 06:58:10 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-7[60290]: 2026-03-10T06:58:10.412+0000 7f073e5e2700 -1 osd.7 39 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-10T06:58:11.709 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc -- ceph osd stat -f json 2026-03-10T06:58:11.732 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:11 vm02 ceph-mon[54377]: from='osd.7 ' entity='osd.7' cmd='[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm05", "root=default"]}]': finished 2026-03-10T06:58:11.732 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:11 vm02 ceph-mon[54377]: osdmap e39: 8 total, 7 up, 8 in 2026-03-10T06:58:11.732 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:11 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T06:58:11.732 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:11 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T06:58:11.732 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:11 vm02 ceph-mon[54377]: from='client.? 192.168.123.102:0/4220653900' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T06:58:11.732 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:11 vm02 ceph-mon[54377]: osd.7 [v2:192.168.123.105:6824/3692745270,v1:192.168.123.105:6825/3692745270] boot 2026-03-10T06:58:11.732 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:11 vm02 ceph-mon[54377]: osdmap e40: 8 total, 8 up, 8 in 2026-03-10T06:58:11.732 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:11 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T06:58:11.732 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:11 vm02 ceph-mon[50158]: from='osd.7 ' entity='osd.7' cmd='[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm05", "root=default"]}]': finished 2026-03-10T06:58:11.732 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:11 vm02 ceph-mon[50158]: osdmap e39: 8 total, 7 up, 8 in 2026-03-10T06:58:11.732 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:11 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T06:58:11.732 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:11 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T06:58:11.732 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:11 vm02 ceph-mon[50158]: from='client.? 192.168.123.102:0/4220653900' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T06:58:11.733 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:11 vm02 ceph-mon[50158]: osd.7 [v2:192.168.123.105:6824/3692745270,v1:192.168.123.105:6825/3692745270] boot 2026-03-10T06:58:11.733 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:11 vm02 ceph-mon[50158]: osdmap e40: 8 total, 8 up, 8 in 2026-03-10T06:58:11.733 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:11 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T06:58:11.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:11 vm05 ceph-mon[48591]: from='osd.7 ' entity='osd.7' cmd='[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm05", "root=default"]}]': finished 2026-03-10T06:58:11.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:11 vm05 ceph-mon[48591]: osdmap e39: 8 total, 7 up, 8 in 2026-03-10T06:58:11.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:11 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T06:58:11.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:11 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T06:58:11.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:11 vm05 ceph-mon[48591]: from='client.? 192.168.123.102:0/4220653900' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T06:58:11.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:11 vm05 ceph-mon[48591]: osd.7 [v2:192.168.123.105:6824/3692745270,v1:192.168.123.105:6825/3692745270] boot 2026-03-10T06:58:11.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:11 vm05 ceph-mon[48591]: osdmap e40: 8 total, 8 up, 8 in 2026-03-10T06:58:11.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:11 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T06:58:12.172 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-10T06:58:12.220 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":40,"num_osds":8,"num_up_osds":8,"osd_up_since":1773125891,"num_in_osds":8,"osd_in_since":1773125882,"num_remapped_pgs":1} 2026-03-10T06:58:12.221 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc -- ceph osd dump --format=json 2026-03-10T06:58:12.435 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/mon.a/config 2026-03-10T06:58:12.562 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:12 vm02 ceph-mon[50158]: purged_snaps scrub starts 2026-03-10T06:58:12.562 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:12 vm02 ceph-mon[50158]: purged_snaps scrub ok 2026-03-10T06:58:12.562 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:12 vm02 ceph-mon[50158]: pgmap v77: 1 pgs: 1 active+clean; 449 KiB data, 41 MiB used, 140 GiB / 140 GiB avail 2026-03-10T06:58:12.562 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:12 vm02 ceph-mon[50158]: from='client.? 192.168.123.102:0/2091352928' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T06:58:12.562 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:12 vm02 ceph-mon[50158]: osdmap e41: 8 total, 8 up, 8 in 2026-03-10T06:58:12.562 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:12 vm02 ceph-mon[54377]: purged_snaps scrub starts 2026-03-10T06:58:12.562 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:12 vm02 ceph-mon[54377]: purged_snaps scrub ok 2026-03-10T06:58:12.562 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:12 vm02 ceph-mon[54377]: pgmap v77: 1 pgs: 1 active+clean; 449 KiB data, 41 MiB used, 140 GiB / 140 GiB avail 2026-03-10T06:58:12.562 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:12 vm02 ceph-mon[54377]: from='client.? 192.168.123.102:0/2091352928' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T06:58:12.562 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:12 vm02 ceph-mon[54377]: osdmap e41: 8 total, 8 up, 8 in 2026-03-10T06:58:12.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:12 vm05 ceph-mon[48591]: purged_snaps scrub starts 2026-03-10T06:58:12.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:12 vm05 ceph-mon[48591]: purged_snaps scrub ok 2026-03-10T06:58:12.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:12 vm05 ceph-mon[48591]: pgmap v77: 1 pgs: 1 active+clean; 449 KiB data, 41 MiB used, 140 GiB / 140 GiB avail 2026-03-10T06:58:12.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:12 vm05 ceph-mon[48591]: from='client.? 192.168.123.102:0/2091352928' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T06:58:12.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:12 vm05 ceph-mon[48591]: osdmap e41: 8 total, 8 up, 8 in 2026-03-10T06:58:12.778 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-10T06:58:12.778 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":41,"fsid":"28bd35e6-1c4e-11f1-9057-21b3549603fc","created":"2026-03-10T06:56:12.122649+0000","modified":"2026-03-10T06:58:12.389843+0000","last_up_change":"2026-03-10T06:58:11.385881+0000","last_in_change":"2026-03-10T06:58:02.296504+0000","flags":"sortbitwise,recovery_deletes,purged_snapdirs,pglog_hardlimit","flags_num":5799936,"flags_set":["pglog_hardlimit","purged_snapdirs","recovery_deletes","sortbitwise"],"crush_version":18,"full_ratio":0.94999998807907104,"backfillfull_ratio":0.89999997615814209,"nearfull_ratio":0.85000002384185791,"cluster_snapshot":"","pool_max":1,"max_osd":8,"require_min_compat_client":"luminous","min_compat_client":"jewel","require_osd_release":"quincy","pools":[{"pool":1,"pool_name":".mgr","create_time":"2026-03-10T06:57:29.589386+0000","flags":1,"flags_names":"hashpspool","type":1,"size":3,"min_size":2,"crush_rule":0,"peering_crush_bucket_count":0,"peering_crush_bucket_target":0,"peering_crush_bucket_barrier":0,"peering_crush_bucket_mandatory_member":2147483647,"object_hash":2,"pg_autoscale_mode":"off","pg_num":1,"pg_placement_num":1,"pg_placement_num_target":1,"pg_num_target":1,"pg_num_pending":1,"last_pg_merge_meta":{"source_pgid":"0.0","ready_epoch":0,"last_epoch_started":0,"last_epoch_clean":0,"source_version":"0'0","target_version":"0'0"},"last_change":"18","last_force_op_resend":"0","last_force_op_resend_prenautilus":"0","last_force_op_resend_preluminous":"0","auid":0,"snap_mode":"selfmanaged","snap_seq":0,"snap_epoch":0,"pool_snaps":[],"removed_snaps":"[]","quota_max_bytes":0,"quota_max_objects":0,"tiers":[],"tier_of":-1,"read_tier":-1,"write_tier":-1,"cache_mode":"none","target_max_bytes":0,"target_max_objects":0,"cache_target_dirty_ratio_micro":400000,"cache_target_dirty_high_ratio_micro":600000,"cache_target_full_ratio_micro":800000,"cache_min_flush_age":0,"cache_min_evict_age":0,"erasure_code_profile":"","hit_set_params":{"type":"none"},"hit_set_period":0,"hit_set_count":0,"use_gmt_hitset":true,"min_read_recency_for_promote":0,"min_write_recency_for_promote":0,"hit_set_grade_decay_rate":0,"hit_set_search_last_n":0,"grade_table":[],"stripe_width":0,"expected_num_objects":0,"fast_read":false,"options":{"pg_num_max":32,"pg_num_min":1},"application_metadata":{"mgr":{}}}],"osds":[{"osd":0,"uuid":"35204e1a-6579-424a-9923-9986832c655c","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":8,"up_thru":40,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6802","nonce":1562453044},{"type":"v1","addr":"192.168.123.102:6803","nonce":1562453044}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6804","nonce":1562453044},{"type":"v1","addr":"192.168.123.102:6805","nonce":1562453044}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6808","nonce":1562453044},{"type":"v1","addr":"192.168.123.102:6809","nonce":1562453044}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6806","nonce":1562453044},{"type":"v1","addr":"192.168.123.102:6807","nonce":1562453044}]},"public_addr":"192.168.123.102:6803/1562453044","cluster_addr":"192.168.123.102:6805/1562453044","heartbeat_back_addr":"192.168.123.102:6809/1562453044","heartbeat_front_addr":"192.168.123.102:6807/1562453044","state":["exists","up"]},{"osd":1,"uuid":"eb30f5d3-2080-416b-b174-8350b4a9f19d","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":11,"up_thru":25,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6810","nonce":2672109890},{"type":"v1","addr":"192.168.123.102:6811","nonce":2672109890}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6812","nonce":2672109890},{"type":"v1","addr":"192.168.123.102:6813","nonce":2672109890}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6816","nonce":2672109890},{"type":"v1","addr":"192.168.123.102:6817","nonce":2672109890}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6814","nonce":2672109890},{"type":"v1","addr":"192.168.123.102:6815","nonce":2672109890}]},"public_addr":"192.168.123.102:6811/2672109890","cluster_addr":"192.168.123.102:6813/2672109890","heartbeat_back_addr":"192.168.123.102:6817/2672109890","heartbeat_front_addr":"192.168.123.102:6815/2672109890","state":["exists","up"]},{"osd":2,"uuid":"dd1d0f2f-d7ae-4bd8-8eb5-71d68bfb7ea3","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":14,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6818","nonce":3047564050},{"type":"v1","addr":"192.168.123.102:6819","nonce":3047564050}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6820","nonce":3047564050},{"type":"v1","addr":"192.168.123.102:6821","nonce":3047564050}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6824","nonce":3047564050},{"type":"v1","addr":"192.168.123.102:6825","nonce":3047564050}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6822","nonce":3047564050},{"type":"v1","addr":"192.168.123.102:6823","nonce":3047564050}]},"public_addr":"192.168.123.102:6819/3047564050","cluster_addr":"192.168.123.102:6821/3047564050","heartbeat_back_addr":"192.168.123.102:6825/3047564050","heartbeat_front_addr":"192.168.123.102:6823/3047564050","state":["exists","up"]},{"osd":3,"uuid":"3a40e513-6ad9-43f1-ba74-0b37d785fad9","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":21,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6826","nonce":3907781773},{"type":"v1","addr":"192.168.123.102:6827","nonce":3907781773}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6828","nonce":3907781773},{"type":"v1","addr":"192.168.123.102:6829","nonce":3907781773}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6832","nonce":3907781773},{"type":"v1","addr":"192.168.123.102:6833","nonce":3907781773}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6830","nonce":3907781773},{"type":"v1","addr":"192.168.123.102:6831","nonce":3907781773}]},"public_addr":"192.168.123.102:6827/3907781773","cluster_addr":"192.168.123.102:6829/3907781773","heartbeat_back_addr":"192.168.123.102:6833/3907781773","heartbeat_front_addr":"192.168.123.102:6831/3907781773","state":["exists","up"]},{"osd":4,"uuid":"fa4107e7-dcd8-4c34-a994-7706880ac944","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":24,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6800","nonce":3902980907},{"type":"v1","addr":"192.168.123.105:6801","nonce":3902980907}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6802","nonce":3902980907},{"type":"v1","addr":"192.168.123.105:6803","nonce":3902980907}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6806","nonce":3902980907},{"type":"v1","addr":"192.168.123.105:6807","nonce":3902980907}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6804","nonce":3902980907},{"type":"v1","addr":"192.168.123.105:6805","nonce":3902980907}]},"public_addr":"192.168.123.105:6801/3902980907","cluster_addr":"192.168.123.105:6803/3902980907","heartbeat_back_addr":"192.168.123.105:6807/3902980907","heartbeat_front_addr":"192.168.123.105:6805/3902980907","state":["exists","up"]},{"osd":5,"uuid":"4cf77df5-c5c7-4d20-b47b-ed4598f3fb56","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":29,"up_thru":30,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6808","nonce":3731608217},{"type":"v1","addr":"192.168.123.105:6809","nonce":3731608217}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6810","nonce":3731608217},{"type":"v1","addr":"192.168.123.105:6811","nonce":3731608217}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6814","nonce":3731608217},{"type":"v1","addr":"192.168.123.105:6815","nonce":3731608217}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6812","nonce":3731608217},{"type":"v1","addr":"192.168.123.105:6813","nonce":3731608217}]},"public_addr":"192.168.123.105:6809/3731608217","cluster_addr":"192.168.123.105:6811/3731608217","heartbeat_back_addr":"192.168.123.105:6815/3731608217","heartbeat_front_addr":"192.168.123.105:6813/3731608217","state":["exists","up"]},{"osd":6,"uuid":"d5098a9b-8d57-4249-b546-8ac52d23059a","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":34,"up_thru":35,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6816","nonce":576011160},{"type":"v1","addr":"192.168.123.105:6817","nonce":576011160}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6818","nonce":576011160},{"type":"v1","addr":"192.168.123.105:6819","nonce":576011160}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6822","nonce":576011160},{"type":"v1","addr":"192.168.123.105:6823","nonce":576011160}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6820","nonce":576011160},{"type":"v1","addr":"192.168.123.105:6821","nonce":576011160}]},"public_addr":"192.168.123.105:6817/576011160","cluster_addr":"192.168.123.105:6819/576011160","heartbeat_back_addr":"192.168.123.105:6823/576011160","heartbeat_front_addr":"192.168.123.105:6821/576011160","state":["exists","up"]},{"osd":7,"uuid":"abb08ecf-81bc-4ddf-8ab4-1c14d5086c97","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":40,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6824","nonce":3692745270},{"type":"v1","addr":"192.168.123.105:6825","nonce":3692745270}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6826","nonce":3692745270},{"type":"v1","addr":"192.168.123.105:6827","nonce":3692745270}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6830","nonce":3692745270},{"type":"v1","addr":"192.168.123.105:6831","nonce":3692745270}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6828","nonce":3692745270},{"type":"v1","addr":"192.168.123.105:6829","nonce":3692745270}]},"public_addr":"192.168.123.105:6825/3692745270","cluster_addr":"192.168.123.105:6827/3692745270","heartbeat_back_addr":"192.168.123.105:6831/3692745270","heartbeat_front_addr":"192.168.123.105:6829/3692745270","state":["exists","up"]}],"osd_xinfo":[{"osd":0,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T06:57:11.431026+0000","dead_epoch":0},{"osd":1,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T06:57:18.850301+0000","dead_epoch":0},{"osd":2,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T06:57:27.220686+0000","dead_epoch":0},{"osd":3,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T06:57:37.296305+0000","dead_epoch":0},{"osd":4,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T06:57:44.191271+0000","dead_epoch":0},{"osd":5,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T06:57:52.476146+0000","dead_epoch":0},{"osd":6,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T06:58:01.062278+0000","dead_epoch":0},{"osd":7,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T06:58:09.862825+0000","dead_epoch":0}],"pg_upmap":[],"pg_upmap_items":[],"pg_temp":[],"primary_temp":[],"blocklist":{"192.168.123.102:0/2777838814":"2026-03-11T06:56:37.324419+0000","192.168.123.102:0/1263130046":"2026-03-11T06:56:37.324419+0000","192.168.123.102:6801/1096882102":"2026-03-11T06:56:37.324419+0000","192.168.123.102:6800/1096882102":"2026-03-11T06:56:37.324419+0000","192.168.123.102:0/2455034139":"2026-03-11T06:56:37.324419+0000","192.168.123.102:6800/3034129265":"2026-03-11T06:56:27.474743+0000","192.168.123.102:0/3993166382":"2026-03-11T06:56:27.474743+0000","192.168.123.102:6801/3034129265":"2026-03-11T06:56:27.474743+0000","192.168.123.102:0/370820550":"2026-03-11T06:56:27.474743+0000","192.168.123.102:0/1375953868":"2026-03-11T06:56:27.474743+0000"},"erasure_code_profiles":{"default":{"crush-failure-domain":"osd","k":"2","m":"1","plugin":"jerasure","technique":"reed_sol_van"}},"removed_snaps_queue":[],"new_removed_snaps":[],"new_purged_snaps":[],"crush_node_flags":{},"device_class_flags":{},"stretch_mode":{"stretch_mode_enabled":false,"stretch_bucket_count":0,"degraded_stretch_mode":0,"recovering_stretch_mode":0,"stretch_mode_bucket":0}} 2026-03-10T06:58:12.822 INFO:tasks.cephadm.ceph_manager.ceph:[{'pool': 1, 'pool_name': '.mgr', 'create_time': '2026-03-10T06:57:29.589386+0000', 'flags': 1, 'flags_names': 'hashpspool', 'type': 1, 'size': 3, 'min_size': 2, 'crush_rule': 0, 'peering_crush_bucket_count': 0, 'peering_crush_bucket_target': 0, 'peering_crush_bucket_barrier': 0, 'peering_crush_bucket_mandatory_member': 2147483647, 'object_hash': 2, 'pg_autoscale_mode': 'off', 'pg_num': 1, 'pg_placement_num': 1, 'pg_placement_num_target': 1, 'pg_num_target': 1, 'pg_num_pending': 1, 'last_pg_merge_meta': {'source_pgid': '0.0', 'ready_epoch': 0, 'last_epoch_started': 0, 'last_epoch_clean': 0, 'source_version': "0'0", 'target_version': "0'0"}, 'last_change': '18', 'last_force_op_resend': '0', 'last_force_op_resend_prenautilus': '0', 'last_force_op_resend_preluminous': '0', 'auid': 0, 'snap_mode': 'selfmanaged', 'snap_seq': 0, 'snap_epoch': 0, 'pool_snaps': [], 'removed_snaps': '[]', 'quota_max_bytes': 0, 'quota_max_objects': 0, 'tiers': [], 'tier_of': -1, 'read_tier': -1, 'write_tier': -1, 'cache_mode': 'none', 'target_max_bytes': 0, 'target_max_objects': 0, 'cache_target_dirty_ratio_micro': 400000, 'cache_target_dirty_high_ratio_micro': 600000, 'cache_target_full_ratio_micro': 800000, 'cache_min_flush_age': 0, 'cache_min_evict_age': 0, 'erasure_code_profile': '', 'hit_set_params': {'type': 'none'}, 'hit_set_period': 0, 'hit_set_count': 0, 'use_gmt_hitset': True, 'min_read_recency_for_promote': 0, 'min_write_recency_for_promote': 0, 'hit_set_grade_decay_rate': 0, 'hit_set_search_last_n': 0, 'grade_table': [], 'stripe_width': 0, 'expected_num_objects': 0, 'fast_read': False, 'options': {'pg_num_max': 32, 'pg_num_min': 1}, 'application_metadata': {'mgr': {}}}] 2026-03-10T06:58:12.822 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc -- ceph osd pool get .mgr pg_num 2026-03-10T06:58:12.979 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/mon.a/config 2026-03-10T06:58:13.314 INFO:teuthology.orchestra.run.vm02.stdout:pg_num: 1 2026-03-10T06:58:13.366 INFO:tasks.cephadm:Adding prometheus.a on vm05 2026-03-10T06:58:13.366 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc -- ceph orch apply prometheus '1;vm05=a' 2026-03-10T06:58:13.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:13 vm02 ceph-mon[54377]: from='client.? 192.168.123.102:0/3552644736' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-10T06:58:13.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:13 vm02 ceph-mon[54377]: from='client.? 192.168.123.102:0/2797396048' entity='client.admin' cmd=[{"prefix": "osd pool get", "pool": ".mgr", "var": "pg_num"}]: dispatch 2026-03-10T06:58:13.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:13 vm02 ceph-mon[50158]: from='client.? 192.168.123.102:0/3552644736' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-10T06:58:13.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:13 vm02 ceph-mon[50158]: from='client.? 192.168.123.102:0/2797396048' entity='client.admin' cmd=[{"prefix": "osd pool get", "pool": ".mgr", "var": "pg_num"}]: dispatch 2026-03-10T06:58:13.698 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:13 vm05 ceph-mon[48591]: from='client.? 192.168.123.102:0/3552644736' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-10T06:58:13.698 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:13 vm05 ceph-mon[48591]: from='client.? 192.168.123.102:0/2797396048' entity='client.admin' cmd=[{"prefix": "osd pool get", "pool": ".mgr", "var": "pg_num"}]: dispatch 2026-03-10T06:58:14.046 INFO:teuthology.orchestra.run.vm05.stdout:Scheduled prometheus update... 2026-03-10T06:58:14.121 DEBUG:teuthology.orchestra.run.vm05:prometheus.a> sudo journalctl -f -n 0 -u ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@prometheus.a.service 2026-03-10T06:58:14.123 INFO:tasks.cephadm:Adding node-exporter.a on vm02 2026-03-10T06:58:14.123 INFO:tasks.cephadm:Adding node-exporter.b on vm05 2026-03-10T06:58:14.123 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc -- ceph orch apply node-exporter '2;vm02=a;vm05=b' 2026-03-10T06:58:14.657 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:14 vm05 ceph-mon[48591]: pgmap v79: 1 pgs: 1 active+clean; 449 KiB data, 46 MiB used, 160 GiB / 160 GiB avail 2026-03-10T06:58:14.657 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:14 vm05 ceph-mon[48591]: osdmap e42: 8 total, 8 up, 8 in 2026-03-10T06:58:14.657 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:14 vm05 ceph-mon[48591]: from='client.24287 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "prometheus", "placement": "1;vm05=a", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T06:58:14.657 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:14 vm05 ceph-mon[48591]: Saving service prometheus spec with placement vm05=a;count:1 2026-03-10T06:58:14.657 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:14 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:58:14.657 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:14 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:58:14.657 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:14 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:58:14.657 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:14 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:58:14.657 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:14 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:58:14.657 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:14 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "mgr module enable", "module": "prometheus"}]: dispatch 2026-03-10T06:58:14.657 INFO:teuthology.orchestra.run.vm05.stdout:Scheduled node-exporter update... 2026-03-10T06:58:14.735 DEBUG:teuthology.orchestra.run.vm02:node-exporter.a> sudo journalctl -f -n 0 -u ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@node-exporter.a.service 2026-03-10T06:58:14.737 DEBUG:teuthology.orchestra.run.vm05:node-exporter.b> sudo journalctl -f -n 0 -u ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@node-exporter.b.service 2026-03-10T06:58:14.739 INFO:tasks.cephadm:Adding alertmanager.a on vm02 2026-03-10T06:58:14.739 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc -- ceph orch apply alertmanager '1;vm02=a' 2026-03-10T06:58:14.761 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:14 vm02 ceph-mon[54377]: pgmap v79: 1 pgs: 1 active+clean; 449 KiB data, 46 MiB used, 160 GiB / 160 GiB avail 2026-03-10T06:58:14.761 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:14 vm02 ceph-mon[54377]: osdmap e42: 8 total, 8 up, 8 in 2026-03-10T06:58:14.761 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:14 vm02 ceph-mon[54377]: from='client.24287 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "prometheus", "placement": "1;vm05=a", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T06:58:14.761 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:14 vm02 ceph-mon[54377]: Saving service prometheus spec with placement vm05=a;count:1 2026-03-10T06:58:14.761 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:14 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:58:14.761 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:14 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:58:14.761 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:14 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:58:14.761 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:14 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:58:14.761 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:14 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:58:14.761 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:14 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "mgr module enable", "module": "prometheus"}]: dispatch 2026-03-10T06:58:14.761 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:14 vm02 ceph-mon[50158]: pgmap v79: 1 pgs: 1 active+clean; 449 KiB data, 46 MiB used, 160 GiB / 160 GiB avail 2026-03-10T06:58:14.761 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:14 vm02 ceph-mon[50158]: osdmap e42: 8 total, 8 up, 8 in 2026-03-10T06:58:14.761 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:14 vm02 ceph-mon[50158]: from='client.24287 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "prometheus", "placement": "1;vm05=a", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T06:58:14.761 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:14 vm02 ceph-mon[50158]: Saving service prometheus spec with placement vm05=a;count:1 2026-03-10T06:58:14.761 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:14 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:58:14.761 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:14 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:58:14.761 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:14 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:58:14.761 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:14 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:58:14.761 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:14 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:58:14.761 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:14 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd=[{"prefix": "mgr module enable", "module": "prometheus"}]: dispatch 2026-03-10T06:58:15.240 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 06:58:15 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[49719]: ignoring --setuser ceph since I am not root 2026-03-10T06:58:15.240 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 06:58:15 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[49719]: ignoring --setgroup ceph since I am not root 2026-03-10T06:58:15.240 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 06:58:15 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[49719]: 2026-03-10T06:58:15.239+0000 7fbd32e28000 -1 mgr[py] Module alerts has missing NOTIFY_TYPES member 2026-03-10T06:58:15.503 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 06:58:15 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[49719]: 2026-03-10T06:58:15.307+0000 7fbd32e28000 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-10T06:58:15.585 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 06:58:15 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[50367]: 2026-03-10T06:58:15.318+0000 7f10dad69000 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-10T06:58:16.003 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 06:58:15 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[49719]: 2026-03-10T06:58:15.705+0000 7fbd32e28000 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-10T06:58:16.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:15 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:58:16.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:15 vm05 ceph-mon[48591]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd='[{"prefix": "mgr module enable", "module": "prometheus"}]': finished 2026-03-10T06:58:16.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:15 vm05 ceph-mon[48591]: mgrmap e16: y(active, since 97s), standbys: x 2026-03-10T06:58:16.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:15 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:58:16.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:15 vm02 ceph-mon[54377]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd='[{"prefix": "mgr module enable", "module": "prometheus"}]': finished 2026-03-10T06:58:16.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:15 vm02 ceph-mon[54377]: mgrmap e16: y(active, since 97s), standbys: x 2026-03-10T06:58:16.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:15 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' 2026-03-10T06:58:16.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:15 vm02 ceph-mon[50158]: from='mgr.14152 192.168.123.102:0/3955812520' entity='mgr.y' cmd='[{"prefix": "mgr module enable", "module": "prometheus"}]': finished 2026-03-10T06:58:16.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:15 vm02 ceph-mon[50158]: mgrmap e16: y(active, since 97s), standbys: x 2026-03-10T06:58:16.085 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 06:58:15 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[50367]: 2026-03-10T06:58:15.746+0000 7f10dad69000 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-10T06:58:16.430 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 06:58:16 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[49719]: 2026-03-10T06:58:16.043+0000 7fbd32e28000 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-10T06:58:16.430 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 06:58:16 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[49719]: 2026-03-10T06:58:16.190+0000 7fbd32e28000 -1 mgr[py] Module diskprediction_local has missing NOTIFY_TYPES member 2026-03-10T06:58:16.430 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 06:58:16 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[49719]: 2026-03-10T06:58:16.246+0000 7fbd32e28000 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-10T06:58:16.534 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 06:58:16 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[50367]: 2026-03-10T06:58:16.138+0000 7f10dad69000 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-10T06:58:16.534 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 06:58:16 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[50367]: 2026-03-10T06:58:16.292+0000 7f10dad69000 -1 mgr[py] Module diskprediction_local has missing NOTIFY_TYPES member 2026-03-10T06:58:16.534 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 06:58:16 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[50367]: 2026-03-10T06:58:16.350+0000 7f10dad69000 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-10T06:58:16.753 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 06:58:16 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[49719]: 2026-03-10T06:58:16.430+0000 7fbd32e28000 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-10T06:58:16.835 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 06:58:16 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[50367]: 2026-03-10T06:58:16.533+0000 7f10dad69000 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-10T06:58:17.319 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 06:58:17 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[49719]: 2026-03-10T06:58:17.053+0000 7fbd32e28000 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-10T06:58:17.319 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 06:58:17 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[49719]: 2026-03-10T06:58:17.257+0000 7fbd32e28000 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-10T06:58:17.445 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 06:58:17 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[50367]: 2026-03-10T06:58:17.183+0000 7f10dad69000 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-10T06:58:17.445 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 06:58:17 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[50367]: 2026-03-10T06:58:17.378+0000 7f10dad69000 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-10T06:58:17.753 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 06:58:17 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[49719]: 2026-03-10T06:58:17.319+0000 7fbd32e28000 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-10T06:58:17.753 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 06:58:17 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[49719]: 2026-03-10T06:58:17.377+0000 7fbd32e28000 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-10T06:58:17.753 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 06:58:17 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[49719]: 2026-03-10T06:58:17.442+0000 7fbd32e28000 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-10T06:58:17.753 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 06:58:17 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[49719]: 2026-03-10T06:58:17.500+0000 7fbd32e28000 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-10T06:58:17.835 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 06:58:17 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[50367]: 2026-03-10T06:58:17.444+0000 7f10dad69000 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-10T06:58:17.835 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 06:58:17 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[50367]: 2026-03-10T06:58:17.501+0000 7f10dad69000 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-10T06:58:17.835 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 06:58:17 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[50367]: 2026-03-10T06:58:17.571+0000 7f10dad69000 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-10T06:58:17.835 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 06:58:17 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[50367]: 2026-03-10T06:58:17.629+0000 7f10dad69000 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-10T06:58:18.253 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 06:58:17 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[49719]: 2026-03-10T06:58:17.824+0000 7fbd32e28000 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-10T06:58:18.253 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 06:58:17 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[49719]: 2026-03-10T06:58:17.902+0000 7fbd32e28000 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-10T06:58:18.335 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 06:58:17 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[50367]: 2026-03-10T06:58:17.945+0000 7f10dad69000 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-10T06:58:18.335 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 06:58:18 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[50367]: 2026-03-10T06:58:18.020+0000 7f10dad69000 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-10T06:58:18.753 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 06:58:18 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[49719]: 2026-03-10T06:58:18.471+0000 7fbd32e28000 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-10T06:58:18.753 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 06:58:18 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[49719]: 2026-03-10T06:58:18.532+0000 7fbd32e28000 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-10T06:58:18.753 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 06:58:18 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[49719]: 2026-03-10T06:58:18.596+0000 7fbd32e28000 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-10T06:58:18.753 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 06:58:18 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[49719]: 2026-03-10T06:58:18.719+0000 7fbd32e28000 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-10T06:58:18.849 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 06:58:18 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[50367]: 2026-03-10T06:58:18.593+0000 7f10dad69000 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-10T06:58:18.849 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 06:58:18 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[50367]: 2026-03-10T06:58:18.656+0000 7f10dad69000 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-10T06:58:18.849 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 06:58:18 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[50367]: 2026-03-10T06:58:18.722+0000 7f10dad69000 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-10T06:58:19.107 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 06:58:18 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[50367]: 2026-03-10T06:58:18.848+0000 7f10dad69000 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-10T06:58:19.107 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 06:58:18 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[50367]: 2026-03-10T06:58:18.912+0000 7f10dad69000 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-10T06:58:19.107 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 06:58:19 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[50367]: 2026-03-10T06:58:19.012+0000 7f10dad69000 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-10T06:58:19.253 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 06:58:18 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[49719]: 2026-03-10T06:58:18.780+0000 7fbd32e28000 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-10T06:58:19.253 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 06:58:18 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[49719]: 2026-03-10T06:58:18.877+0000 7fbd32e28000 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-10T06:58:19.253 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 06:58:18 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[49719]: 2026-03-10T06:58:18.966+0000 7fbd32e28000 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-10T06:58:19.439 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 06:58:19 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[50367]: 2026-03-10T06:58:19.106+0000 7f10dad69000 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-10T06:58:19.753 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 06:58:19 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[49719]: 2026-03-10T06:58:19.295+0000 7fbd32e28000 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-10T06:58:19.753 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 06:58:19 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[49719]: 2026-03-10T06:58:19.355+0000 7fbd32e28000 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-10T06:58:19.753 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 06:58:19 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[49719]: [10/Mar/2026:06:58:19] ENGINE Bus STARTING 2026-03-10T06:58:19.753 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 06:58:19 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[49719]: CherryPy Checker: 2026-03-10T06:58:19.753 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 06:58:19 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[49719]: The Application mounted at '' has an empty config. 2026-03-10T06:58:19.753 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 06:58:19 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[49719]: 2026-03-10T06:58:19.753 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 06:58:19 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[49719]: [10/Mar/2026:06:58:19] ENGINE Serving on http://:::9283 2026-03-10T06:58:19.753 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 06:58:19 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[49719]: [10/Mar/2026:06:58:19] ENGINE Bus STARTED 2026-03-10T06:58:19.834 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 06:58:19 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[50367]: 2026-03-10T06:58:19.438+0000 7f10dad69000 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-10T06:58:19.835 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 06:58:19 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[50367]: 2026-03-10T06:58:19.499+0000 7f10dad69000 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-10T06:58:20.448 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:20 vm05 ceph-mon[48591]: Standby manager daemon x restarted 2026-03-10T06:58:20.449 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:20 vm05 ceph-mon[48591]: Standby manager daemon x started 2026-03-10T06:58:20.449 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:20 vm05 ceph-mon[48591]: from='mgr.? 192.168.123.105:0/1165118000' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-10T06:58:20.449 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:20 vm05 ceph-mon[48591]: from='mgr.? 192.168.123.105:0/1165118000' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T06:58:20.449 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:20 vm05 ceph-mon[48591]: from='mgr.? 192.168.123.105:0/1165118000' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-10T06:58:20.449 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:20 vm05 ceph-mon[48591]: from='mgr.? 192.168.123.105:0/1165118000' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T06:58:20.449 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:20 vm05 ceph-mon[48591]: Active manager daemon y restarted 2026-03-10T06:58:20.449 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:20 vm05 ceph-mon[48591]: Activating manager daemon y 2026-03-10T06:58:20.449 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:20 vm05 ceph-mon[48591]: osdmap e43: 8 total, 8 up, 8 in 2026-03-10T06:58:20.452 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:20 vm02 ceph-mon[54377]: Standby manager daemon x restarted 2026-03-10T06:58:20.452 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:20 vm02 ceph-mon[54377]: Standby manager daemon x started 2026-03-10T06:58:20.452 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:20 vm02 ceph-mon[54377]: from='mgr.? 192.168.123.105:0/1165118000' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-10T06:58:20.452 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:20 vm02 ceph-mon[54377]: from='mgr.? 192.168.123.105:0/1165118000' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T06:58:20.452 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:20 vm02 ceph-mon[54377]: from='mgr.? 192.168.123.105:0/1165118000' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-10T06:58:20.452 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:20 vm02 ceph-mon[54377]: from='mgr.? 192.168.123.105:0/1165118000' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T06:58:20.452 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:20 vm02 ceph-mon[54377]: Active manager daemon y restarted 2026-03-10T06:58:20.452 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:20 vm02 ceph-mon[54377]: Activating manager daemon y 2026-03-10T06:58:20.452 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:20 vm02 ceph-mon[54377]: osdmap e43: 8 total, 8 up, 8 in 2026-03-10T06:58:20.452 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 06:58:20 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[50367]: [10/Mar/2026:06:58:20] ENGINE Bus STARTING 2026-03-10T06:58:20.452 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 06:58:20 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[50367]: CherryPy Checker: 2026-03-10T06:58:20.452 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 06:58:20 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[50367]: The Application mounted at '' has an empty config. 2026-03-10T06:58:20.452 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 06:58:20 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[50367]: 2026-03-10T06:58:20.452 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 06:58:20 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[50367]: [10/Mar/2026:06:58:20] ENGINE Serving on http://:::9283 2026-03-10T06:58:20.452 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 06:58:20 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[50367]: [10/Mar/2026:06:58:20] ENGINE Bus STARTED 2026-03-10T06:58:20.453 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:20 vm02 ceph-mon[50158]: Standby manager daemon x restarted 2026-03-10T06:58:20.453 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:20 vm02 ceph-mon[50158]: Standby manager daemon x started 2026-03-10T06:58:20.453 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:20 vm02 ceph-mon[50158]: from='mgr.? 192.168.123.105:0/1165118000' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-10T06:58:20.453 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:20 vm02 ceph-mon[50158]: from='mgr.? 192.168.123.105:0/1165118000' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T06:58:20.453 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:20 vm02 ceph-mon[50158]: from='mgr.? 192.168.123.105:0/1165118000' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-10T06:58:20.453 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:20 vm02 ceph-mon[50158]: from='mgr.? 192.168.123.105:0/1165118000' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T06:58:20.453 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:20 vm02 ceph-mon[50158]: Active manager daemon y restarted 2026-03-10T06:58:20.453 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:20 vm02 ceph-mon[50158]: Activating manager daemon y 2026-03-10T06:58:20.453 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:20 vm02 ceph-mon[50158]: osdmap e43: 8 total, 8 up, 8 in 2026-03-10T06:58:21.085 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 06:58:20 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[50367]: [10/Mar/2026:06:58:20] ENGINE Bus STARTING 2026-03-10T06:58:21.085 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 06:58:20 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[50367]: [10/Mar/2026:06:58:20] ENGINE Serving on https://192.168.123.102:7150 2026-03-10T06:58:21.085 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 06:58:20 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[50367]: [10/Mar/2026:06:58:20] ENGINE Bus STARTED 2026-03-10T06:58:21.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:21 vm05 ceph-mon[48591]: mgrmap e17: y(active, starting, since 0.706596s), standbys: x 2026-03-10T06:58:21.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:21 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T06:58:21.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:21 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T06:58:21.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:21 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T06:58:21.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:21 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-10T06:58:21.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:21 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-10T06:58:21.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:21 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T06:58:21.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:21 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T06:58:21.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:21 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T06:58:21.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:21 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T06:58:21.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:21 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T06:58:21.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:21 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T06:58:21.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:21 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T06:58:21.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:21 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T06:58:21.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:21 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T06:58:21.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:21 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T06:58:21.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:21 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T06:58:21.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:21 vm05 ceph-mon[48591]: Manager daemon y is now available 2026-03-10T06:58:21.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:21 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:21.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:21 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:58:21.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:21 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:58:21.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:21 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:58:21.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:21 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:58:21.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:21 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:58:21.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:21 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T06:58:21.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:21 vm05 ceph-mon[48591]: [10/Mar/2026:06:58:20] ENGINE Bus STARTING 2026-03-10T06:58:21.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:21 vm05 ceph-mon[48591]: [10/Mar/2026:06:58:20] ENGINE Serving on https://192.168.123.102:7150 2026-03-10T06:58:21.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:21 vm05 ceph-mon[48591]: [10/Mar/2026:06:58:20] ENGINE Bus STARTED 2026-03-10T06:58:21.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:21 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:21.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:21 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:21.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:21 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:21.256 INFO:teuthology.orchestra.run.vm05.stdout:Scheduled alertmanager update... 2026-03-10T06:58:21.307 DEBUG:teuthology.orchestra.run.vm02:alertmanager.a> sudo journalctl -f -n 0 -u ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@alertmanager.a.service 2026-03-10T06:58:21.308 INFO:tasks.cephadm:Adding grafana.a on vm05 2026-03-10T06:58:21.308 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc -- ceph orch apply grafana '1;vm05=a' 2026-03-10T06:58:21.408 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:21 vm02 ceph-mon[54377]: mgrmap e17: y(active, starting, since 0.706596s), standbys: x 2026-03-10T06:58:21.408 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:21 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T06:58:21.408 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:21 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T06:58:21.408 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:21 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T06:58:21.408 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:21 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-10T06:58:21.408 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:21 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-10T06:58:21.408 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:21 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T06:58:21.408 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:21 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T06:58:21.408 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:21 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T06:58:21.408 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:21 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T06:58:21.408 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:21 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T06:58:21.408 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:21 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T06:58:21.408 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:21 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T06:58:21.408 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:21 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T06:58:21.408 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:21 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T06:58:21.408 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:21 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T06:58:21.408 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:21 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T06:58:21.408 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:21 vm02 ceph-mon[54377]: Manager daemon y is now available 2026-03-10T06:58:21.408 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:21 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:21.408 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:21 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:58:21.408 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:21 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:58:21.408 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:21 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:58:21.408 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:21 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:58:21.408 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:21 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:58:21.408 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:21 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T06:58:21.408 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:21 vm02 ceph-mon[54377]: [10/Mar/2026:06:58:20] ENGINE Bus STARTING 2026-03-10T06:58:21.408 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:21 vm02 ceph-mon[54377]: [10/Mar/2026:06:58:20] ENGINE Serving on https://192.168.123.102:7150 2026-03-10T06:58:21.408 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:21 vm02 ceph-mon[54377]: [10/Mar/2026:06:58:20] ENGINE Bus STARTED 2026-03-10T06:58:21.408 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:21 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:21.408 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:21 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:21.408 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:21 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:21.408 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:21 vm02 ceph-mon[50158]: mgrmap e17: y(active, starting, since 0.706596s), standbys: x 2026-03-10T06:58:21.408 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:21 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T06:58:21.408 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:21 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T06:58:21.408 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:21 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T06:58:21.408 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:21 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-10T06:58:21.408 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:21 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-10T06:58:21.408 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:21 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T06:58:21.408 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:21 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T06:58:21.408 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:21 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T06:58:21.408 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:21 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T06:58:21.409 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:21 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T06:58:21.409 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:21 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T06:58:21.409 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:21 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T06:58:21.409 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:21 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T06:58:21.409 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:21 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T06:58:21.409 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:21 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T06:58:21.409 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:21 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T06:58:21.409 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:21 vm02 ceph-mon[50158]: Manager daemon y is now available 2026-03-10T06:58:21.409 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:21 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:21.409 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:21 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:58:21.409 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:21 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T06:58:21.409 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:21 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:58:21.409 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:21 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:58:21.409 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:21 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:58:21.409 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:21 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T06:58:21.409 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:21 vm02 ceph-mon[50158]: [10/Mar/2026:06:58:20] ENGINE Bus STARTING 2026-03-10T06:58:21.409 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:21 vm02 ceph-mon[50158]: [10/Mar/2026:06:58:20] ENGINE Serving on https://192.168.123.102:7150 2026-03-10T06:58:21.409 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:21 vm02 ceph-mon[50158]: [10/Mar/2026:06:58:20] ENGINE Bus STARTED 2026-03-10T06:58:21.409 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:21 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:21.409 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:21 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:21.409 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:21 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:21.854 INFO:teuthology.orchestra.run.vm05.stdout:Scheduled grafana update... 2026-03-10T06:58:21.915 DEBUG:teuthology.orchestra.run.vm05:grafana.a> sudo journalctl -f -n 0 -u ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@grafana.a.service 2026-03-10T06:58:21.917 INFO:tasks.cephadm:Setting up client nodes... 2026-03-10T06:58:21.917 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc -- ceph auth get-or-create client.0 mon 'allow *' osd 'allow *' mds 'allow *' mgr 'allow *' 2026-03-10T06:58:22.492 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:22 vm02 ceph-mon[54377]: mgrmap e18: y(active, since 1.71546s), standbys: x 2026-03-10T06:58:22.492 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:22 vm02 ceph-mon[54377]: from='client.24308 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "alertmanager", "placement": "1;vm02=a", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T06:58:22.492 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:22 vm02 ceph-mon[54377]: Saving service alertmanager spec with placement vm02=a;count:1 2026-03-10T06:58:22.492 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:22 vm02 ceph-mon[54377]: pgmap v3: 1 pgs: 1 active+clean; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail 2026-03-10T06:58:22.492 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:22 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:22.492 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:22 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:22.492 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:22 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-10T06:58:22.492 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:22 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.5", "name": "osd_memory_target"}]: dispatch 2026-03-10T06:58:22.492 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:22 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.6", "name": "osd_memory_target"}]: dispatch 2026-03-10T06:58:22.492 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:22 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.7", "name": "osd_memory_target"}]: dispatch 2026-03-10T06:58:22.492 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:22 vm02 ceph-mon[54377]: Adjusting osd_memory_target on vm05 to 65802k 2026-03-10T06:58:22.492 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:22 vm02 ceph-mon[54377]: Unable to set osd_memory_target on vm05 to 67381964: error parsing value: Value '67381964' is below minimum 939524096 2026-03-10T06:58:22.492 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:22 vm02 ceph-mon[54377]: Updating vm05:/etc/ceph/ceph.conf 2026-03-10T06:58:22.492 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:22 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:22.492 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:22 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm02", "name": "osd_memory_target"}]: dispatch 2026-03-10T06:58:22.492 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:22 vm02 ceph-mon[54377]: Updating vm02:/etc/ceph/ceph.conf 2026-03-10T06:58:22.492 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:22 vm02 ceph-mon[54377]: Updating vm05:/etc/ceph/ceph.client.admin.keyring 2026-03-10T06:58:22.492 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:22 vm02 ceph-mon[54377]: Updating vm02:/etc/ceph/ceph.client.admin.keyring 2026-03-10T06:58:22.492 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:22 vm02 ceph-mon[54377]: from='client.14445 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "grafana", "placement": "1;vm05=a", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T06:58:22.492 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:22 vm02 ceph-mon[54377]: Saving service grafana spec with placement vm05=a;count:1 2026-03-10T06:58:22.492 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:22 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:22.493 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:22 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:22.493 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:22 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:22.493 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:22 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:22.493 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:22 vm02 ceph-mon[54377]: Deploying daemon node-exporter.a on vm02 2026-03-10T06:58:22.494 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:22 vm02 ceph-mon[50158]: mgrmap e18: y(active, since 1.71546s), standbys: x 2026-03-10T06:58:22.494 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:22 vm02 ceph-mon[50158]: from='client.24308 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "alertmanager", "placement": "1;vm02=a", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T06:58:22.494 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:22 vm02 ceph-mon[50158]: Saving service alertmanager spec with placement vm02=a;count:1 2026-03-10T06:58:22.494 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:22 vm02 ceph-mon[50158]: pgmap v3: 1 pgs: 1 active+clean; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail 2026-03-10T06:58:22.494 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:22 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:22.494 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:22 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:22.494 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:22 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-10T06:58:22.494 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:22 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.5", "name": "osd_memory_target"}]: dispatch 2026-03-10T06:58:22.494 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:22 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.6", "name": "osd_memory_target"}]: dispatch 2026-03-10T06:58:22.494 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:22 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.7", "name": "osd_memory_target"}]: dispatch 2026-03-10T06:58:22.494 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:22 vm02 ceph-mon[50158]: Adjusting osd_memory_target on vm05 to 65802k 2026-03-10T06:58:22.494 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:22 vm02 ceph-mon[50158]: Unable to set osd_memory_target on vm05 to 67381964: error parsing value: Value '67381964' is below minimum 939524096 2026-03-10T06:58:22.494 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:22 vm02 ceph-mon[50158]: Updating vm05:/etc/ceph/ceph.conf 2026-03-10T06:58:22.494 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:22 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:22.494 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:22 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm02", "name": "osd_memory_target"}]: dispatch 2026-03-10T06:58:22.495 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:22 vm02 ceph-mon[50158]: Updating vm02:/etc/ceph/ceph.conf 2026-03-10T06:58:22.495 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:22 vm02 ceph-mon[50158]: Updating vm05:/etc/ceph/ceph.client.admin.keyring 2026-03-10T06:58:22.495 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:22 vm02 ceph-mon[50158]: Updating vm02:/etc/ceph/ceph.client.admin.keyring 2026-03-10T06:58:22.495 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:22 vm02 ceph-mon[50158]: from='client.14445 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "grafana", "placement": "1;vm05=a", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T06:58:22.495 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:22 vm02 ceph-mon[50158]: Saving service grafana spec with placement vm05=a;count:1 2026-03-10T06:58:22.495 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:22 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:22.495 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:22 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:22.495 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:22 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:22.495 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:22 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:22.495 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:22 vm02 ceph-mon[50158]: Deploying daemon node-exporter.a on vm02 2026-03-10T06:58:22.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:22 vm05 ceph-mon[48591]: mgrmap e18: y(active, since 1.71546s), standbys: x 2026-03-10T06:58:22.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:22 vm05 ceph-mon[48591]: from='client.24308 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "alertmanager", "placement": "1;vm02=a", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T06:58:22.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:22 vm05 ceph-mon[48591]: Saving service alertmanager spec with placement vm02=a;count:1 2026-03-10T06:58:22.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:22 vm05 ceph-mon[48591]: pgmap v3: 1 pgs: 1 active+clean; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail 2026-03-10T06:58:22.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:22 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:22.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:22 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:22.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:22 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-10T06:58:22.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:22 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.5", "name": "osd_memory_target"}]: dispatch 2026-03-10T06:58:22.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:22 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.6", "name": "osd_memory_target"}]: dispatch 2026-03-10T06:58:22.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:22 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.7", "name": "osd_memory_target"}]: dispatch 2026-03-10T06:58:22.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:22 vm05 ceph-mon[48591]: Adjusting osd_memory_target on vm05 to 65802k 2026-03-10T06:58:22.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:22 vm05 ceph-mon[48591]: Unable to set osd_memory_target on vm05 to 67381964: error parsing value: Value '67381964' is below minimum 939524096 2026-03-10T06:58:22.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:22 vm05 ceph-mon[48591]: Updating vm05:/etc/ceph/ceph.conf 2026-03-10T06:58:22.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:22 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:22.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:22 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm02", "name": "osd_memory_target"}]: dispatch 2026-03-10T06:58:22.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:22 vm05 ceph-mon[48591]: Updating vm02:/etc/ceph/ceph.conf 2026-03-10T06:58:22.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:22 vm05 ceph-mon[48591]: Updating vm05:/etc/ceph/ceph.client.admin.keyring 2026-03-10T06:58:22.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:22 vm05 ceph-mon[48591]: Updating vm02:/etc/ceph/ceph.client.admin.keyring 2026-03-10T06:58:22.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:22 vm05 ceph-mon[48591]: from='client.14445 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "grafana", "placement": "1;vm05=a", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T06:58:22.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:22 vm05 ceph-mon[48591]: Saving service grafana spec with placement vm05=a;count:1 2026-03-10T06:58:22.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:22 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:22.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:22 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:22.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:22 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:22.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:22 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:22.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:22 vm05 ceph-mon[48591]: Deploying daemon node-exporter.a on vm02 2026-03-10T06:58:22.549 INFO:teuthology.orchestra.run.vm02.stdout:[client.0] 2026-03-10T06:58:22.549 INFO:teuthology.orchestra.run.vm02.stdout: key = AQAOwa9p5IckIBAApnaWwTJMX/bmJu7bk+GArA== 2026-03-10T06:58:22.633 DEBUG:teuthology.orchestra.run.vm02:> set -ex 2026-03-10T06:58:22.633 DEBUG:teuthology.orchestra.run.vm02:> sudo dd of=/etc/ceph/ceph.client.0.keyring 2026-03-10T06:58:22.633 DEBUG:teuthology.orchestra.run.vm02:> sudo chmod 0644 /etc/ceph/ceph.client.0.keyring 2026-03-10T06:58:22.783 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc -- ceph auth get-or-create client.1 mon 'allow *' osd 'allow *' mds 'allow *' mgr 'allow *' 2026-03-10T06:58:23.302 INFO:teuthology.orchestra.run.vm05.stdout:[client.1] 2026-03-10T06:58:23.302 INFO:teuthology.orchestra.run.vm05.stdout: key = AQAPwa9pP+G1ERAArChSeGMQvTKrM8nnw78Frg== 2026-03-10T06:58:23.335 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 06:58:22 vm02 bash[68618]: Trying to pull quay.io/prometheus/node-exporter:v1.3.1... 2026-03-10T06:58:23.348 DEBUG:teuthology.orchestra.run.vm05:> set -ex 2026-03-10T06:58:23.348 DEBUG:teuthology.orchestra.run.vm05:> sudo dd of=/etc/ceph/ceph.client.1.keyring 2026-03-10T06:58:23.348 DEBUG:teuthology.orchestra.run.vm05:> sudo chmod 0644 /etc/ceph/ceph.client.1.keyring 2026-03-10T06:58:23.387 INFO:tasks.ceph:Waiting until ceph daemons up and pgs clean... 2026-03-10T06:58:23.387 INFO:tasks.cephadm.ceph_manager.ceph:waiting for mgr available 2026-03-10T06:58:23.387 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc -- ceph mgr dump --format=json 2026-03-10T06:58:23.544 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/mon.a/config 2026-03-10T06:58:23.664 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:23 vm02 ceph-mon[54377]: pgmap v4: 1 pgs: 1 active+clean; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail 2026-03-10T06:58:23.665 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:23 vm02 ceph-mon[54377]: mgrmap e19: y(active, since 2s), standbys: x 2026-03-10T06:58:23.665 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:23 vm02 ceph-mon[54377]: from='client.? 192.168.123.102:0/3420309085' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-10T06:58:23.665 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:23 vm02 ceph-mon[54377]: from='client.? ' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-10T06:58:23.665 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:23 vm02 ceph-mon[54377]: from='client.? ' entity='client.admin' cmd='[{"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]': finished 2026-03-10T06:58:23.665 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:23 vm02 ceph-mon[54377]: from='client.? 192.168.123.105:0/3300779447' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-10T06:58:23.665 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:23 vm02 ceph-mon[54377]: from='client.? ' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-10T06:58:23.665 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:23 vm02 ceph-mon[54377]: from='client.? ' entity='client.admin' cmd='[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]': finished 2026-03-10T06:58:23.666 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:23 vm02 ceph-mon[50158]: pgmap v4: 1 pgs: 1 active+clean; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail 2026-03-10T06:58:23.666 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:23 vm02 ceph-mon[50158]: mgrmap e19: y(active, since 2s), standbys: x 2026-03-10T06:58:23.666 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:23 vm02 ceph-mon[50158]: from='client.? 192.168.123.102:0/3420309085' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-10T06:58:23.666 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:23 vm02 ceph-mon[50158]: from='client.? ' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-10T06:58:23.666 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:23 vm02 ceph-mon[50158]: from='client.? ' entity='client.admin' cmd='[{"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]': finished 2026-03-10T06:58:23.666 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:23 vm02 ceph-mon[50158]: from='client.? 192.168.123.105:0/3300779447' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-10T06:58:23.666 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:23 vm02 ceph-mon[50158]: from='client.? ' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-10T06:58:23.666 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:23 vm02 ceph-mon[50158]: from='client.? ' entity='client.admin' cmd='[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]': finished 2026-03-10T06:58:23.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:23 vm05 ceph-mon[48591]: pgmap v4: 1 pgs: 1 active+clean; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail 2026-03-10T06:58:23.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:23 vm05 ceph-mon[48591]: mgrmap e19: y(active, since 2s), standbys: x 2026-03-10T06:58:23.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:23 vm05 ceph-mon[48591]: from='client.? 192.168.123.102:0/3420309085' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-10T06:58:23.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:23 vm05 ceph-mon[48591]: from='client.? ' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-10T06:58:23.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:23 vm05 ceph-mon[48591]: from='client.? ' entity='client.admin' cmd='[{"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]': finished 2026-03-10T06:58:23.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:23 vm05 ceph-mon[48591]: from='client.? 192.168.123.105:0/3300779447' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-10T06:58:23.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:23 vm05 ceph-mon[48591]: from='client.? ' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-10T06:58:23.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:23 vm05 ceph-mon[48591]: from='client.? ' entity='client.admin' cmd='[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]': finished 2026-03-10T06:58:23.971 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-10T06:58:24.022 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":19,"active_gid":14415,"active_name":"y","active_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6800","nonce":596000248},{"type":"v1","addr":"192.168.123.102:6801","nonce":596000248}]},"active_addr":"192.168.123.102:6801/596000248","active_change":"2026-03-10T06:58:19.502669+0000","active_mgr_features":4540138303579357183,"available":true,"standbys":[{"gid":24299,"name":"x","mgr_features":4540138303579357183,"available_modules":[{"name":"alerts","can_run":true,"error_string":"","module_options":{"interval":{"name":"interval","type":"secs","level":"advanced","flags":1,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"How frequently to reexamine health status","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"smtp_destination":{"name":"smtp_destination","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Email address to send alerts to","long_desc":"","tags":[],"see_also":[]},"smtp_from_name":{"name":"smtp_from_name","type":"str","level":"advanced","flags":1,"default_value":"Ceph","min":"","max":"","enum_allowed":[],"desc":"Email From: name","long_desc":"","tags":[],"see_also":[]},"smtp_host":{"name":"smtp_host","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"SMTP server","long_desc":"","tags":[],"see_also":[]},"smtp_password":{"name":"smtp_password","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Password to authenticate with","long_desc":"","tags":[],"see_also":[]},"smtp_port":{"name":"smtp_port","type":"int","level":"advanced","flags":1,"default_value":"465","min":"","max":"","enum_allowed":[],"desc":"SMTP port","long_desc":"","tags":[],"see_also":[]},"smtp_sender":{"name":"smtp_sender","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"SMTP envelope sender","long_desc":"","tags":[],"see_also":[]},"smtp_ssl":{"name":"smtp_ssl","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Use SSL to connect to SMTP server","long_desc":"","tags":[],"see_also":[]},"smtp_user":{"name":"smtp_user","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"User to authenticate as","long_desc":"","tags":[],"see_also":[]}}},{"name":"balancer","can_run":true,"error_string":"","module_options":{"active":{"name":"active","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"automatically balance PGs across cluster","long_desc":"","tags":[],"see_also":[]},"begin_time":{"name":"begin_time","type":"str","level":"advanced","flags":1,"default_value":"0000","min":"","max":"","enum_allowed":[],"desc":"beginning time of day to automatically balance","long_desc":"This is a time of day in the format HHMM.","tags":[],"see_also":[]},"begin_weekday":{"name":"begin_weekday","type":"uint","level":"advanced","flags":1,"default_value":"0","min":"0","max":"7","enum_allowed":[],"desc":"Restrict automatic balancing to this day of the week or later","long_desc":"0 or 7 = Sunday, 1 = Monday, etc.","tags":[],"see_also":[]},"crush_compat_max_iterations":{"name":"crush_compat_max_iterations","type":"uint","level":"advanced","flags":1,"default_value":"25","min":"1","max":"250","enum_allowed":[],"desc":"maximum number of iterations to attempt optimization","long_desc":"","tags":[],"see_also":[]},"crush_compat_metrics":{"name":"crush_compat_metrics","type":"str","level":"advanced","flags":1,"default_value":"pgs,objects,bytes","min":"","max":"","enum_allowed":[],"desc":"metrics with which to calculate OSD utilization","long_desc":"Value is a list of one or more of \"pgs\", \"objects\", or \"bytes\", and indicates which metrics to use to balance utilization.","tags":[],"see_also":[]},"crush_compat_step":{"name":"crush_compat_step","type":"float","level":"advanced","flags":1,"default_value":"0.5","min":"0.001","max":"0.999","enum_allowed":[],"desc":"aggressiveness of optimization","long_desc":".99 is very aggressive, .01 is less aggressive","tags":[],"see_also":[]},"end_time":{"name":"end_time","type":"str","level":"advanced","flags":1,"default_value":"2400","min":"","max":"","enum_allowed":[],"desc":"ending time of day to automatically balance","long_desc":"This is a time of day in the format HHMM.","tags":[],"see_also":[]},"end_weekday":{"name":"end_weekday","type":"uint","level":"advanced","flags":1,"default_value":"7","min":"0","max":"7","enum_allowed":[],"desc":"Restrict automatic balancing to days of the week earlier than this","long_desc":"0 or 7 = Sunday, 1 = Monday, etc.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"min_score":{"name":"min_score","type":"float","level":"advanced","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"minimum score, below which no optimization is attempted","long_desc":"","tags":[],"see_also":[]},"mode":{"name":"mode","type":"str","level":"advanced","flags":1,"default_value":"upmap","min":"","max":"","enum_allowed":["crush-compat","none","upmap"],"desc":"Balancer mode","long_desc":"","tags":[],"see_also":[]},"pool_ids":{"name":"pool_ids","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"pools which the automatic balancing will be limited to","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"how frequently to wake up and attempt optimization","long_desc":"","tags":[],"see_also":[]},"upmap_max_deviation":{"name":"upmap_max_deviation","type":"int","level":"advanced","flags":1,"default_value":"5","min":"1","max":"","enum_allowed":[],"desc":"deviation below which no optimization is attempted","long_desc":"If the number of PGs are within this count then no optimization is attempted","tags":[],"see_also":[]},"upmap_max_optimizations":{"name":"upmap_max_optimizations","type":"uint","level":"advanced","flags":1,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"maximum upmap optimizations to make per attempt","long_desc":"","tags":[],"see_also":[]}}},{"name":"cephadm","can_run":true,"error_string":"","module_options":{"agent_down_multiplier":{"name":"agent_down_multiplier","type":"float","level":"advanced","flags":0,"default_value":"3.0","min":"","max":"","enum_allowed":[],"desc":"Multiplied by agent refresh rate to calculate how long agent must not report before being marked down","long_desc":"","tags":[],"see_also":[]},"agent_refresh_rate":{"name":"agent_refresh_rate","type":"secs","level":"advanced","flags":0,"default_value":"20","min":"","max":"","enum_allowed":[],"desc":"How often agent on each host will try to gather and send metadata","long_desc":"","tags":[],"see_also":[]},"agent_starting_port":{"name":"agent_starting_port","type":"int","level":"advanced","flags":0,"default_value":"4721","min":"","max":"","enum_allowed":[],"desc":"First port agent will try to bind to (will also try up to next 1000 subsequent ports if blocked)","long_desc":"","tags":[],"see_also":[]},"allow_ptrace":{"name":"allow_ptrace","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow SYS_PTRACE capability on ceph containers","long_desc":"The SYS_PTRACE capability is needed to attach to a process with gdb or strace. Enabling this options can allow debugging daemons that encounter problems at runtime.","tags":[],"see_also":[]},"autotune_interval":{"name":"autotune_interval","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to autotune daemon memory","long_desc":"","tags":[],"see_also":[]},"autotune_memory_target_ratio":{"name":"autotune_memory_target_ratio","type":"float","level":"advanced","flags":0,"default_value":"0.7","min":"","max":"","enum_allowed":[],"desc":"ratio of total system memory to divide amongst autotuned daemons","long_desc":"","tags":[],"see_also":[]},"config_checks_enabled":{"name":"config_checks_enabled","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable or disable the cephadm configuration analysis","long_desc":"","tags":[],"see_also":[]},"config_dashboard":{"name":"config_dashboard","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"manage configs like API endpoints in Dashboard.","long_desc":"","tags":[],"see_also":[]},"container_image_alertmanager":{"name":"container_image_alertmanager","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/alertmanager:v0.23.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_base":{"name":"container_image_base","type":"str","level":"advanced","flags":1,"default_value":"quay.io/ceph/ceph","min":"","max":"","enum_allowed":[],"desc":"Container image name, without the tag","long_desc":"","tags":[],"see_also":[]},"container_image_grafana":{"name":"container_image_grafana","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/ceph-grafana:8.3.5","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_haproxy":{"name":"container_image_haproxy","type":"str","level":"advanced","flags":0,"default_value":"docker.io/library/haproxy:2.3","min":"","max":"","enum_allowed":[],"desc":"HAproxy container image","long_desc":"","tags":[],"see_also":[]},"container_image_keepalived":{"name":"container_image_keepalived","type":"str","level":"advanced","flags":0,"default_value":"docker.io/arcts/keepalived","min":"","max":"","enum_allowed":[],"desc":"Keepalived container image","long_desc":"","tags":[],"see_also":[]},"container_image_node_exporter":{"name":"container_image_node_exporter","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/node-exporter:v1.3.1","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_prometheus":{"name":"container_image_prometheus","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/prometheus:v2.33.4","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_snmp_gateway":{"name":"container_image_snmp_gateway","type":"str","level":"advanced","flags":0,"default_value":"docker.io/maxwo/snmp-notifier:v1.2.1","min":"","max":"","enum_allowed":[],"desc":"SNMP Gateway container image","long_desc":"","tags":[],"see_also":[]},"container_init":{"name":"container_init","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Run podman/docker with `--init`","long_desc":"","tags":[],"see_also":[]},"daemon_cache_timeout":{"name":"daemon_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"seconds to cache service (daemon) inventory","long_desc":"","tags":[],"see_also":[]},"default_registry":{"name":"default_registry","type":"str","level":"advanced","flags":0,"default_value":"docker.io","min":"","max":"","enum_allowed":[],"desc":"Search-registry to which we should normalize unqualified image names. This is not the default registry","long_desc":"","tags":[],"see_also":[]},"device_cache_timeout":{"name":"device_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"1800","min":"","max":"","enum_allowed":[],"desc":"seconds to cache device inventory","long_desc":"","tags":[],"see_also":[]},"device_enhanced_scan":{"name":"device_enhanced_scan","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Use libstoragemgmt during device scans","long_desc":"","tags":[],"see_also":[]},"facts_cache_timeout":{"name":"facts_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"seconds to cache host facts data","long_desc":"","tags":[],"see_also":[]},"host_check_interval":{"name":"host_check_interval","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to perform a host check","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"log to the \"cephadm\" cluster log channel\"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"manage_etc_ceph_ceph_conf":{"name":"manage_etc_ceph_ceph_conf","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Manage and own /etc/ceph/ceph.conf on the hosts.","long_desc":"","tags":[],"see_also":[]},"manage_etc_ceph_ceph_conf_hosts":{"name":"manage_etc_ceph_ceph_conf_hosts","type":"str","level":"advanced","flags":0,"default_value":"*","min":"","max":"","enum_allowed":[],"desc":"PlacementSpec describing on which hosts to manage /etc/ceph/ceph.conf","long_desc":"","tags":[],"see_also":[]},"max_count_per_host":{"name":"max_count_per_host","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"max number of daemons per service per host","long_desc":"","tags":[],"see_also":[]},"max_osd_draining_count":{"name":"max_osd_draining_count","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"max number of osds that will be drained simultaneously when osds are removed","long_desc":"","tags":[],"see_also":[]},"migration_current":{"name":"migration_current","type":"int","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"internal - do not modify","long_desc":"","tags":[],"see_also":[]},"mode":{"name":"mode","type":"str","level":"advanced","flags":0,"default_value":"root","min":"","max":"","enum_allowed":["cephadm-package","root"],"desc":"mode for remote execution of cephadm","long_desc":"","tags":[],"see_also":[]},"prometheus_alerts_path":{"name":"prometheus_alerts_path","type":"str","level":"advanced","flags":0,"default_value":"/etc/prometheus/ceph/ceph_default_alerts.yml","min":"","max":"","enum_allowed":[],"desc":"location of alerts to include in prometheus deployments","long_desc":"","tags":[],"see_also":[]},"registry_insecure":{"name":"registry_insecure","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Registry is to be considered insecure (no TLS available). Only for development purposes.","long_desc":"","tags":[],"see_also":[]},"registry_password":{"name":"registry_password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Custom repository password. Only used for logging into a registry.","long_desc":"","tags":[],"see_also":[]},"registry_url":{"name":"registry_url","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Registry url for login purposes. This is not the default registry","long_desc":"","tags":[],"see_also":[]},"registry_username":{"name":"registry_username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Custom repository username. Only used for logging into a registry.","long_desc":"","tags":[],"see_also":[]},"ssh_config_file":{"name":"ssh_config_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"customized SSH config file to connect to managed hosts","long_desc":"","tags":[],"see_also":[]},"use_agent":{"name":"use_agent","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Use cephadm agent on each host to gather and send metadata","long_desc":"","tags":[],"see_also":[]},"use_repo_digest":{"name":"use_repo_digest","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Automatically convert image tags to image digest. Make sure all daemons use the same image","long_desc":"","tags":[],"see_also":[]},"warn_on_failed_host_check":{"name":"warn_on_failed_host_check","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if the host check fails","long_desc":"","tags":[],"see_also":[]},"warn_on_stray_daemons":{"name":"warn_on_stray_daemons","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if daemons are detected that are not managed by cephadm","long_desc":"","tags":[],"see_also":[]},"warn_on_stray_hosts":{"name":"warn_on_stray_hosts","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if daemons are detected on a host that is not managed by cephadm","long_desc":"","tags":[],"see_also":[]}}},{"name":"crash","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"retain_interval":{"name":"retain_interval","type":"secs","level":"advanced","flags":1,"default_value":"31536000","min":"","max":"","enum_allowed":[],"desc":"how long to retain crashes before pruning them","long_desc":"","tags":[],"see_also":[]},"warn_recent_interval":{"name":"warn_recent_interval","type":"secs","level":"advanced","flags":1,"default_value":"1209600","min":"","max":"","enum_allowed":[],"desc":"time interval in which to warn about recent crashes","long_desc":"","tags":[],"see_also":[]}}},{"name":"dashboard","can_run":true,"error_string":"","module_options":{"ACCOUNT_LOCKOUT_ATTEMPTS":{"name":"ACCOUNT_LOCKOUT_ATTEMPTS","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ALERTMANAGER_API_HOST":{"name":"ALERTMANAGER_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ALERTMANAGER_API_SSL_VERIFY":{"name":"ALERTMANAGER_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"AUDIT_API_ENABLED":{"name":"AUDIT_API_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"AUDIT_API_LOG_PAYLOAD":{"name":"AUDIT_API_LOG_PAYLOAD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ENABLE_BROWSABLE_API":{"name":"ENABLE_BROWSABLE_API","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_CEPHFS":{"name":"FEATURE_TOGGLE_CEPHFS","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_ISCSI":{"name":"FEATURE_TOGGLE_ISCSI","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_MIRRORING":{"name":"FEATURE_TOGGLE_MIRRORING","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_NFS":{"name":"FEATURE_TOGGLE_NFS","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_RBD":{"name":"FEATURE_TOGGLE_RBD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_RGW":{"name":"FEATURE_TOGGLE_RGW","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE":{"name":"GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_PASSWORD":{"name":"GRAFANA_API_PASSWORD","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_SSL_VERIFY":{"name":"GRAFANA_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_URL":{"name":"GRAFANA_API_URL","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_USERNAME":{"name":"GRAFANA_API_USERNAME","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_FRONTEND_API_URL":{"name":"GRAFANA_FRONTEND_API_URL","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_UPDATE_DASHBOARDS":{"name":"GRAFANA_UPDATE_DASHBOARDS","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ISCSI_API_SSL_VERIFICATION":{"name":"ISCSI_API_SSL_VERIFICATION","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ISSUE_TRACKER_API_KEY":{"name":"ISSUE_TRACKER_API_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PROMETHEUS_API_HOST":{"name":"PROMETHEUS_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PROMETHEUS_API_SSL_VERIFY":{"name":"PROMETHEUS_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_COMPLEXITY_ENABLED":{"name":"PWD_POLICY_CHECK_COMPLEXITY_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_EXCLUSION_LIST_ENABLED":{"name":"PWD_POLICY_CHECK_EXCLUSION_LIST_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_LENGTH_ENABLED":{"name":"PWD_POLICY_CHECK_LENGTH_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_OLDPWD_ENABLED":{"name":"PWD_POLICY_CHECK_OLDPWD_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_REPETITIVE_CHARS_ENABLED":{"name":"PWD_POLICY_CHECK_REPETITIVE_CHARS_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_SEQUENTIAL_CHARS_ENABLED":{"name":"PWD_POLICY_CHECK_SEQUENTIAL_CHARS_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_USERNAME_ENABLED":{"name":"PWD_POLICY_CHECK_USERNAME_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_ENABLED":{"name":"PWD_POLICY_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_EXCLUSION_LIST":{"name":"PWD_POLICY_EXCLUSION_LIST","type":"str","level":"advanced","flags":0,"default_value":"osd,host,dashboard,pool,block,nfs,ceph,monitors,gateway,logs,crush,maps","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_MIN_COMPLEXITY":{"name":"PWD_POLICY_MIN_COMPLEXITY","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_MIN_LENGTH":{"name":"PWD_POLICY_MIN_LENGTH","type":"int","level":"advanced","flags":0,"default_value":"8","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"REST_REQUESTS_TIMEOUT":{"name":"REST_REQUESTS_TIMEOUT","type":"int","level":"advanced","flags":0,"default_value":"45","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_ACCESS_KEY":{"name":"RGW_API_ACCESS_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_ADMIN_RESOURCE":{"name":"RGW_API_ADMIN_RESOURCE","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SECRET_KEY":{"name":"RGW_API_SECRET_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SSL_VERIFY":{"name":"RGW_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_SPAN":{"name":"USER_PWD_EXPIRATION_SPAN","type":"int","level":"advanced","flags":0,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_WARNING_1":{"name":"USER_PWD_EXPIRATION_WARNING_1","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_WARNING_2":{"name":"USER_PWD_EXPIRATION_WARNING_2","type":"int","level":"advanced","flags":0,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"crt_file":{"name":"crt_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"debug":{"name":"debug","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable/disable debug options","long_desc":"","tags":[],"see_also":[]},"jwt_token_ttl":{"name":"jwt_token_ttl","type":"int","level":"advanced","flags":0,"default_value":"28800","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"key_file":{"name":"key_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"motd":{"name":"motd","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"The message of the day","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"::","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"int","level":"advanced","flags":0,"default_value":"8080","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl":{"name":"ssl","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl_server_port":{"name":"ssl_server_port","type":"int","level":"advanced","flags":0,"default_value":"8443","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_behaviour":{"name":"standby_behaviour","type":"str","level":"advanced","flags":0,"default_value":"redirect","min":"","max":"","enum_allowed":["error","redirect"],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_error_status_code":{"name":"standby_error_status_code","type":"int","level":"advanced","flags":0,"default_value":"500","min":"400","max":"599","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"url_prefix":{"name":"url_prefix","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"devicehealth","can_run":true,"error_string":"","module_options":{"enable_monitoring":{"name":"enable_monitoring","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"monitor device health metrics","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"mark_out_threshold":{"name":"mark_out_threshold","type":"secs","level":"advanced","flags":1,"default_value":"2419200","min":"","max":"","enum_allowed":[],"desc":"automatically mark OSD if it may fail before this long","long_desc":"","tags":[],"see_also":[]},"pool_name":{"name":"pool_name","type":"str","level":"advanced","flags":1,"default_value":"device_health_metrics","min":"","max":"","enum_allowed":[],"desc":"name of pool in which to store device health metrics","long_desc":"","tags":[],"see_also":[]},"retention_period":{"name":"retention_period","type":"secs","level":"advanced","flags":1,"default_value":"15552000","min":"","max":"","enum_allowed":[],"desc":"how long to retain device health metrics","long_desc":"","tags":[],"see_also":[]},"scrape_frequency":{"name":"scrape_frequency","type":"secs","level":"advanced","flags":1,"default_value":"86400","min":"","max":"","enum_allowed":[],"desc":"how frequently to scrape device health metrics","long_desc":"","tags":[],"see_also":[]},"self_heal":{"name":"self_heal","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"preemptively heal cluster around devices that may fail","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to wake up and check device health","long_desc":"","tags":[],"see_also":[]},"warn_threshold":{"name":"warn_threshold","type":"secs","level":"advanced","flags":1,"default_value":"7257600","min":"","max":"","enum_allowed":[],"desc":"raise health warning if OSD may fail before this long","long_desc":"","tags":[],"see_also":[]}}},{"name":"diskprediction_local","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"predict_interval":{"name":"predict_interval","type":"str","level":"advanced","flags":0,"default_value":"86400","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"predictor_model":{"name":"predictor_model","type":"str","level":"advanced","flags":0,"default_value":"prophetstor","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"str","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"influx","can_run":false,"error_string":"influxdb python module not found","module_options":{"batch_size":{"name":"batch_size","type":"int","level":"advanced","flags":0,"default_value":"5000","min":"","max":"","enum_allowed":[],"desc":"How big batches of data points should be when sending to InfluxDB.","long_desc":"","tags":[],"see_also":[]},"database":{"name":"database","type":"str","level":"advanced","flags":0,"default_value":"ceph","min":"","max":"","enum_allowed":[],"desc":"InfluxDB database name. You will need to create this database and grant write privileges to the configured username or the username must have admin privileges to create it.","long_desc":"","tags":[],"see_also":[]},"hostname":{"name":"hostname","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"InfluxDB server hostname","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"30","min":"5","max":"","enum_allowed":[],"desc":"Time between reports to InfluxDB. Default 30 seconds.","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"password":{"name":"password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"password of InfluxDB server user","long_desc":"","tags":[],"see_also":[]},"port":{"name":"port","type":"int","level":"advanced","flags":0,"default_value":"8086","min":"","max":"","enum_allowed":[],"desc":"InfluxDB server port","long_desc":"","tags":[],"see_also":[]},"ssl":{"name":"ssl","type":"str","level":"advanced","flags":0,"default_value":"false","min":"","max":"","enum_allowed":[],"desc":"Use https connection for InfluxDB server. Use \"true\" or \"false\".","long_desc":"","tags":[],"see_also":[]},"threads":{"name":"threads","type":"int","level":"advanced","flags":0,"default_value":"5","min":"1","max":"32","enum_allowed":[],"desc":"How many worker threads should be spawned for sending data to InfluxDB.","long_desc":"","tags":[],"see_also":[]},"username":{"name":"username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"username of InfluxDB server user","long_desc":"","tags":[],"see_also":[]},"verify_ssl":{"name":"verify_ssl","type":"str","level":"advanced","flags":0,"default_value":"true","min":"","max":"","enum_allowed":[],"desc":"Verify https cert for InfluxDB server. Use \"true\" or \"false\".","long_desc":"","tags":[],"see_also":[]}}},{"name":"insights","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"iostat","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"k8sevents","can_run":true,"error_string":"","module_options":{"ceph_event_retention_days":{"name":"ceph_event_retention_days","type":"int","level":"advanced","flags":0,"default_value":"7","min":"","max":"","enum_allowed":[],"desc":"Days to hold ceph event information within local cache","long_desc":"","tags":[],"see_also":[]},"config_check_secs":{"name":"config_check_secs","type":"int","level":"advanced","flags":0,"default_value":"10","min":"10","max":"","enum_allowed":[],"desc":"interval (secs) to check for cluster configuration changes","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"localpool","can_run":true,"error_string":"","module_options":{"failure_domain":{"name":"failure_domain","type":"str","level":"advanced","flags":1,"default_value":"host","min":"","max":"","enum_allowed":[],"desc":"failure domain for any created local pool","long_desc":"what failure domain we should separate data replicas across.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"min_size":{"name":"min_size","type":"int","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"default min_size for any created local pool","long_desc":"value to set min_size to (unchanged from Ceph's default if this option is not set)","tags":[],"see_also":[]},"num_rep":{"name":"num_rep","type":"int","level":"advanced","flags":1,"default_value":"3","min":"","max":"","enum_allowed":[],"desc":"default replica count for any created local pool","long_desc":"","tags":[],"see_also":[]},"pg_num":{"name":"pg_num","type":"int","level":"advanced","flags":1,"default_value":"128","min":"","max":"","enum_allowed":[],"desc":"default pg_num for any created local pool","long_desc":"","tags":[],"see_also":[]},"prefix":{"name":"prefix","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"name prefix for any created local pool","long_desc":"","tags":[],"see_also":[]},"subtree":{"name":"subtree","type":"str","level":"advanced","flags":1,"default_value":"rack","min":"","max":"","enum_allowed":[],"desc":"CRUSH level for which to create a local pool","long_desc":"which CRUSH subtree type the module should create a pool for.","tags":[],"see_also":[]}}},{"name":"mds_autoscaler","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"mirroring","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"nfs","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"orchestrator","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"orchestrator":{"name":"orchestrator","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["cephadm","rook","test_orchestrator"],"desc":"Orchestrator backend","long_desc":"","tags":[],"see_also":[]}}},{"name":"osd_perf_query","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"osd_support","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"pg_autoscaler","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"noautoscale":{"name":"noautoscale","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"global autoscale flag","long_desc":"Option to turn on/off the autoscaler for all pools","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"threshold":{"name":"threshold","type":"float","level":"advanced","flags":0,"default_value":"3.0","min":"1.0","max":"","enum_allowed":[],"desc":"scaling threshold","long_desc":"The factor by which the `NEW PG_NUM` must vary from the current`PG_NUM` before being accepted. Cannot be less than 1.0","tags":[],"see_also":[]}}},{"name":"progress","can_run":true,"error_string":"","module_options":{"allow_pg_recovery_event":{"name":"allow_pg_recovery_event","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow the module to show pg recovery progress","long_desc":"","tags":[],"see_also":[]},"enabled":{"name":"enabled","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_completed_events":{"name":"max_completed_events","type":"int","level":"advanced","flags":1,"default_value":"50","min":"","max":"","enum_allowed":[],"desc":"number of past completed events to remember","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"how long the module is going to sleep","long_desc":"","tags":[],"see_also":[]}}},{"name":"prometheus","can_run":true,"error_string":"","module_options":{"cache":{"name":"cache","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rbd_stats_pools":{"name":"rbd_stats_pools","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rbd_stats_pools_refresh_interval":{"name":"rbd_stats_pools_refresh_interval","type":"int","level":"advanced","flags":0,"default_value":"300","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"scrape_interval":{"name":"scrape_interval","type":"float","level":"advanced","flags":0,"default_value":"15.0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"::","min":"","max":"","enum_allowed":[],"desc":"the IPv4 or IPv6 address on which the module listens for HTTP requests","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"int","level":"advanced","flags":0,"default_value":"9283","min":"","max":"","enum_allowed":[],"desc":"the port on which the module listens for HTTP requests","long_desc":"","tags":[],"see_also":[]},"stale_cache_strategy":{"name":"stale_cache_strategy","type":"str","level":"advanced","flags":0,"default_value":"log","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_behaviour":{"name":"standby_behaviour","type":"str","level":"advanced","flags":1,"default_value":"default","min":"","max":"","enum_allowed":["default","error"],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_error_status_code":{"name":"standby_error_status_code","type":"int","level":"advanced","flags":1,"default_value":"500","min":"400","max":"599","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rbd_support","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_concurrent_snap_create":{"name":"max_concurrent_snap_create","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"mirror_snapshot_schedule":{"name":"mirror_snapshot_schedule","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"trash_purge_schedule":{"name":"trash_purge_schedule","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"restful","can_run":true,"error_string":"","module_options":{"enable_auth":{"name":"enable_auth","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"key_file":{"name":"key_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rook","can_run":true,"error_string":"","module_options":{"drive_group_interval":{"name":"drive_group_interval","type":"float","level":"advanced","flags":0,"default_value":"300.0","min":"","max":"","enum_allowed":[],"desc":"interval in seconds between re-application of applied drive_groups","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"storage_class":{"name":"storage_class","type":"str","level":"advanced","flags":0,"default_value":"local","min":"","max":"","enum_allowed":[],"desc":"storage class name for LSO-discovered PVs","long_desc":"","tags":[],"see_also":[]}}},{"name":"selftest","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"roption1":{"name":"roption1","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"roption2":{"name":"roption2","type":"str","level":"advanced","flags":0,"default_value":"xyz","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption1":{"name":"rwoption1","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption2":{"name":"rwoption2","type":"int","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption3":{"name":"rwoption3","type":"float","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption4":{"name":"rwoption4","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption5":{"name":"rwoption5","type":"bool","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption6":{"name":"rwoption6","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption7":{"name":"rwoption7","type":"int","level":"advanced","flags":0,"default_value":"","min":"1","max":"42","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testkey":{"name":"testkey","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testlkey":{"name":"testlkey","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testnewline":{"name":"testnewline","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"snap_schedule","can_run":true,"error_string":"","module_options":{"allow_m_granularity":{"name":"allow_m_granularity","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow minute scheduled snapshots","long_desc":"","tags":[],"see_also":[]},"dump_on_update":{"name":"dump_on_update","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"dump database to debug log on update","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"stats","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"status","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"telegraf","can_run":true,"error_string":"","module_options":{"address":{"name":"address","type":"str","level":"advanced","flags":0,"default_value":"unixgram:///tmp/telegraf.sock","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"15","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"telemetry","can_run":true,"error_string":"","module_options":{"channel_basic":{"name":"channel_basic","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share basic cluster information (size, version)","long_desc":"","tags":[],"see_also":[]},"channel_crash":{"name":"channel_crash","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share metadata about Ceph daemon crashes (version, stack straces, etc)","long_desc":"","tags":[],"see_also":[]},"channel_device":{"name":"channel_device","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share device health metrics (e.g., SMART data, minus potentially identifying info like serial numbers)","long_desc":"","tags":[],"see_also":[]},"channel_ident":{"name":"channel_ident","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Share a user-provided description and/or contact email for the cluster","long_desc":"","tags":[],"see_also":[]},"channel_perf":{"name":"channel_perf","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Share various performance metrics of a cluster","long_desc":"","tags":[],"see_also":[]},"contact":{"name":"contact","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"description":{"name":"description","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"device_url":{"name":"device_url","type":"str","level":"advanced","flags":0,"default_value":"https://telemetry.ceph.com/device","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"enabled":{"name":"enabled","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"int","level":"advanced","flags":0,"default_value":"24","min":"8","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"last_opt_revision":{"name":"last_opt_revision","type":"int","level":"advanced","flags":0,"default_value":"1","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"leaderboard":{"name":"leaderboard","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"organization":{"name":"organization","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"proxy":{"name":"proxy","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"url":{"name":"url","type":"str","level":"advanced","flags":0,"default_value":"https://telemetry.ceph.com/report","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"test_orchestrator","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"volumes","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_concurrent_clones":{"name":"max_concurrent_clones","type":"int","level":"advanced","flags":0,"default_value":"4","min":"","max":"","enum_allowed":[],"desc":"Number of asynchronous cloner threads","long_desc":"","tags":[],"see_also":[]},"snapshot_clone_delay":{"name":"snapshot_clone_delay","type":"int","level":"advanced","flags":0,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"Delay clone begin operation by snapshot_clone_delay seconds","long_desc":"","tags":[],"see_also":[]}}},{"name":"zabbix","can_run":true,"error_string":"","module_options":{"discovery_interval":{"name":"discovery_interval","type":"uint","level":"advanced","flags":0,"default_value":"100","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"identifier":{"name":"identifier","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_host":{"name":"zabbix_host","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_port":{"name":"zabbix_port","type":"int","level":"advanced","flags":0,"default_value":"10051","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_sender":{"name":"zabbix_sender","type":"str","level":"advanced","flags":0,"default_value":"/usr/bin/zabbix_sender","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}}]}],"modules":["cephadm","dashboard","iostat","nfs","prometheus","restful"],"available_modules":[{"name":"alerts","can_run":true,"error_string":"","module_options":{"interval":{"name":"interval","type":"secs","level":"advanced","flags":1,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"How frequently to reexamine health status","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"smtp_destination":{"name":"smtp_destination","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Email address to send alerts to","long_desc":"","tags":[],"see_also":[]},"smtp_from_name":{"name":"smtp_from_name","type":"str","level":"advanced","flags":1,"default_value":"Ceph","min":"","max":"","enum_allowed":[],"desc":"Email From: name","long_desc":"","tags":[],"see_also":[]},"smtp_host":{"name":"smtp_host","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"SMTP server","long_desc":"","tags":[],"see_also":[]},"smtp_password":{"name":"smtp_password","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Password to authenticate with","long_desc":"","tags":[],"see_also":[]},"smtp_port":{"name":"smtp_port","type":"int","level":"advanced","flags":1,"default_value":"465","min":"","max":"","enum_allowed":[],"desc":"SMTP port","long_desc":"","tags":[],"see_also":[]},"smtp_sender":{"name":"smtp_sender","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"SMTP envelope sender","long_desc":"","tags":[],"see_also":[]},"smtp_ssl":{"name":"smtp_ssl","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Use SSL to connect to SMTP server","long_desc":"","tags":[],"see_also":[]},"smtp_user":{"name":"smtp_user","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"User to authenticate as","long_desc":"","tags":[],"see_also":[]}}},{"name":"balancer","can_run":true,"error_string":"","module_options":{"active":{"name":"active","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"automatically balance PGs across cluster","long_desc":"","tags":[],"see_also":[]},"begin_time":{"name":"begin_time","type":"str","level":"advanced","flags":1,"default_value":"0000","min":"","max":"","enum_allowed":[],"desc":"beginning time of day to automatically balance","long_desc":"This is a time of day in the format HHMM.","tags":[],"see_also":[]},"begin_weekday":{"name":"begin_weekday","type":"uint","level":"advanced","flags":1,"default_value":"0","min":"0","max":"7","enum_allowed":[],"desc":"Restrict automatic balancing to this day of the week or later","long_desc":"0 or 7 = Sunday, 1 = Monday, etc.","tags":[],"see_also":[]},"crush_compat_max_iterations":{"name":"crush_compat_max_iterations","type":"uint","level":"advanced","flags":1,"default_value":"25","min":"1","max":"250","enum_allowed":[],"desc":"maximum number of iterations to attempt optimization","long_desc":"","tags":[],"see_also":[]},"crush_compat_metrics":{"name":"crush_compat_metrics","type":"str","level":"advanced","flags":1,"default_value":"pgs,objects,bytes","min":"","max":"","enum_allowed":[],"desc":"metrics with which to calculate OSD utilization","long_desc":"Value is a list of one or more of \"pgs\", \"objects\", or \"bytes\", and indicates which metrics to use to balance utilization.","tags":[],"see_also":[]},"crush_compat_step":{"name":"crush_compat_step","type":"float","level":"advanced","flags":1,"default_value":"0.5","min":"0.001","max":"0.999","enum_allowed":[],"desc":"aggressiveness of optimization","long_desc":".99 is very aggressive, .01 is less aggressive","tags":[],"see_also":[]},"end_time":{"name":"end_time","type":"str","level":"advanced","flags":1,"default_value":"2400","min":"","max":"","enum_allowed":[],"desc":"ending time of day to automatically balance","long_desc":"This is a time of day in the format HHMM.","tags":[],"see_also":[]},"end_weekday":{"name":"end_weekday","type":"uint","level":"advanced","flags":1,"default_value":"7","min":"0","max":"7","enum_allowed":[],"desc":"Restrict automatic balancing to days of the week earlier than this","long_desc":"0 or 7 = Sunday, 1 = Monday, etc.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"min_score":{"name":"min_score","type":"float","level":"advanced","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"minimum score, below which no optimization is attempted","long_desc":"","tags":[],"see_also":[]},"mode":{"name":"mode","type":"str","level":"advanced","flags":1,"default_value":"upmap","min":"","max":"","enum_allowed":["crush-compat","none","upmap"],"desc":"Balancer mode","long_desc":"","tags":[],"see_also":[]},"pool_ids":{"name":"pool_ids","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"pools which the automatic balancing will be limited to","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"how frequently to wake up and attempt optimization","long_desc":"","tags":[],"see_also":[]},"upmap_max_deviation":{"name":"upmap_max_deviation","type":"int","level":"advanced","flags":1,"default_value":"5","min":"1","max":"","enum_allowed":[],"desc":"deviation below which no optimization is attempted","long_desc":"If the number of PGs are within this count then no optimization is attempted","tags":[],"see_also":[]},"upmap_max_optimizations":{"name":"upmap_max_optimizations","type":"uint","level":"advanced","flags":1,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"maximum upmap optimizations to make per attempt","long_desc":"","tags":[],"see_also":[]}}},{"name":"cephadm","can_run":true,"error_string":"","module_options":{"agent_down_multiplier":{"name":"agent_down_multiplier","type":"float","level":"advanced","flags":0,"default_value":"3.0","min":"","max":"","enum_allowed":[],"desc":"Multiplied by agent refresh rate to calculate how long agent must not report before being marked down","long_desc":"","tags":[],"see_also":[]},"agent_refresh_rate":{"name":"agent_refresh_rate","type":"secs","level":"advanced","flags":0,"default_value":"20","min":"","max":"","enum_allowed":[],"desc":"How often agent on each host will try to gather and send metadata","long_desc":"","tags":[],"see_also":[]},"agent_starting_port":{"name":"agent_starting_port","type":"int","level":"advanced","flags":0,"default_value":"4721","min":"","max":"","enum_allowed":[],"desc":"First port agent will try to bind to (will also try up to next 1000 subsequent ports if blocked)","long_desc":"","tags":[],"see_also":[]},"allow_ptrace":{"name":"allow_ptrace","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow SYS_PTRACE capability on ceph containers","long_desc":"The SYS_PTRACE capability is needed to attach to a process with gdb or strace. Enabling this options can allow debugging daemons that encounter problems at runtime.","tags":[],"see_also":[]},"autotune_interval":{"name":"autotune_interval","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to autotune daemon memory","long_desc":"","tags":[],"see_also":[]},"autotune_memory_target_ratio":{"name":"autotune_memory_target_ratio","type":"float","level":"advanced","flags":0,"default_value":"0.7","min":"","max":"","enum_allowed":[],"desc":"ratio of total system memory to divide amongst autotuned daemons","long_desc":"","tags":[],"see_also":[]},"config_checks_enabled":{"name":"config_checks_enabled","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable or disable the cephadm configuration analysis","long_desc":"","tags":[],"see_also":[]},"config_dashboard":{"name":"config_dashboard","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"manage configs like API endpoints in Dashboard.","long_desc":"","tags":[],"see_also":[]},"container_image_alertmanager":{"name":"container_image_alertmanager","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/alertmanager:v0.23.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_base":{"name":"container_image_base","type":"str","level":"advanced","flags":1,"default_value":"quay.io/ceph/ceph","min":"","max":"","enum_allowed":[],"desc":"Container image name, without the tag","long_desc":"","tags":[],"see_also":[]},"container_image_grafana":{"name":"container_image_grafana","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/ceph-grafana:8.3.5","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_haproxy":{"name":"container_image_haproxy","type":"str","level":"advanced","flags":0,"default_value":"docker.io/library/haproxy:2.3","min":"","max":"","enum_allowed":[],"desc":"HAproxy container image","long_desc":"","tags":[],"see_also":[]},"container_image_keepalived":{"name":"container_image_keepalived","type":"str","level":"advanced","flags":0,"default_value":"docker.io/arcts/keepalived","min":"","max":"","enum_allowed":[],"desc":"Keepalived container image","long_desc":"","tags":[],"see_also":[]},"container_image_node_exporter":{"name":"container_image_node_exporter","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/node-exporter:v1.3.1","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_prometheus":{"name":"container_image_prometheus","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/prometheus:v2.33.4","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_snmp_gateway":{"name":"container_image_snmp_gateway","type":"str","level":"advanced","flags":0,"default_value":"docker.io/maxwo/snmp-notifier:v1.2.1","min":"","max":"","enum_allowed":[],"desc":"SNMP Gateway container image","long_desc":"","tags":[],"see_also":[]},"container_init":{"name":"container_init","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Run podman/docker with `--init`","long_desc":"","tags":[],"see_also":[]},"daemon_cache_timeout":{"name":"daemon_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"seconds to cache service (daemon) inventory","long_desc":"","tags":[],"see_also":[]},"default_registry":{"name":"default_registry","type":"str","level":"advanced","flags":0,"default_value":"docker.io","min":"","max":"","enum_allowed":[],"desc":"Search-registry to which we should normalize unqualified image names. This is not the default registry","long_desc":"","tags":[],"see_also":[]},"device_cache_timeout":{"name":"device_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"1800","min":"","max":"","enum_allowed":[],"desc":"seconds to cache device inventory","long_desc":"","tags":[],"see_also":[]},"device_enhanced_scan":{"name":"device_enhanced_scan","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Use libstoragemgmt during device scans","long_desc":"","tags":[],"see_also":[]},"facts_cache_timeout":{"name":"facts_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"seconds to cache host facts data","long_desc":"","tags":[],"see_also":[]},"host_check_interval":{"name":"host_check_interval","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to perform a host check","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"log to the \"cephadm\" cluster log channel\"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"manage_etc_ceph_ceph_conf":{"name":"manage_etc_ceph_ceph_conf","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Manage and own /etc/ceph/ceph.conf on the hosts.","long_desc":"","tags":[],"see_also":[]},"manage_etc_ceph_ceph_conf_hosts":{"name":"manage_etc_ceph_ceph_conf_hosts","type":"str","level":"advanced","flags":0,"default_value":"*","min":"","max":"","enum_allowed":[],"desc":"PlacementSpec describing on which hosts to manage /etc/ceph/ceph.conf","long_desc":"","tags":[],"see_also":[]},"max_count_per_host":{"name":"max_count_per_host","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"max number of daemons per service per host","long_desc":"","tags":[],"see_also":[]},"max_osd_draining_count":{"name":"max_osd_draining_count","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"max number of osds that will be drained simultaneously when osds are removed","long_desc":"","tags":[],"see_also":[]},"migration_current":{"name":"migration_current","type":"int","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"internal - do not modify","long_desc":"","tags":[],"see_also":[]},"mode":{"name":"mode","type":"str","level":"advanced","flags":0,"default_value":"root","min":"","max":"","enum_allowed":["cephadm-package","root"],"desc":"mode for remote execution of cephadm","long_desc":"","tags":[],"see_also":[]},"prometheus_alerts_path":{"name":"prometheus_alerts_path","type":"str","level":"advanced","flags":0,"default_value":"/etc/prometheus/ceph/ceph_default_alerts.yml","min":"","max":"","enum_allowed":[],"desc":"location of alerts to include in prometheus deployments","long_desc":"","tags":[],"see_also":[]},"registry_insecure":{"name":"registry_insecure","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Registry is to be considered insecure (no TLS available). Only for development purposes.","long_desc":"","tags":[],"see_also":[]},"registry_password":{"name":"registry_password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Custom repository password. Only used for logging into a registry.","long_desc":"","tags":[],"see_also":[]},"registry_url":{"name":"registry_url","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Registry url for login purposes. This is not the default registry","long_desc":"","tags":[],"see_also":[]},"registry_username":{"name":"registry_username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Custom repository username. Only used for logging into a registry.","long_desc":"","tags":[],"see_also":[]},"ssh_config_file":{"name":"ssh_config_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"customized SSH config file to connect to managed hosts","long_desc":"","tags":[],"see_also":[]},"use_agent":{"name":"use_agent","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Use cephadm agent on each host to gather and send metadata","long_desc":"","tags":[],"see_also":[]},"use_repo_digest":{"name":"use_repo_digest","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Automatically convert image tags to image digest. Make sure all daemons use the same image","long_desc":"","tags":[],"see_also":[]},"warn_on_failed_host_check":{"name":"warn_on_failed_host_check","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if the host check fails","long_desc":"","tags":[],"see_also":[]},"warn_on_stray_daemons":{"name":"warn_on_stray_daemons","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if daemons are detected that are not managed by cephadm","long_desc":"","tags":[],"see_also":[]},"warn_on_stray_hosts":{"name":"warn_on_stray_hosts","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if daemons are detected on a host that is not managed by cephadm","long_desc":"","tags":[],"see_also":[]}}},{"name":"crash","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"retain_interval":{"name":"retain_interval","type":"secs","level":"advanced","flags":1,"default_value":"31536000","min":"","max":"","enum_allowed":[],"desc":"how long to retain crashes before pruning them","long_desc":"","tags":[],"see_also":[]},"warn_recent_interval":{"name":"warn_recent_interval","type":"secs","level":"advanced","flags":1,"default_value":"1209600","min":"","max":"","enum_allowed":[],"desc":"time interval in which to warn about recent crashes","long_desc":"","tags":[],"see_also":[]}}},{"name":"dashboard","can_run":true,"error_string":"","module_options":{"ACCOUNT_LOCKOUT_ATTEMPTS":{"name":"ACCOUNT_LOCKOUT_ATTEMPTS","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ALERTMANAGER_API_HOST":{"name":"ALERTMANAGER_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ALERTMANAGER_API_SSL_VERIFY":{"name":"ALERTMANAGER_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"AUDIT_API_ENABLED":{"name":"AUDIT_API_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"AUDIT_API_LOG_PAYLOAD":{"name":"AUDIT_API_LOG_PAYLOAD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ENABLE_BROWSABLE_API":{"name":"ENABLE_BROWSABLE_API","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_CEPHFS":{"name":"FEATURE_TOGGLE_CEPHFS","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_ISCSI":{"name":"FEATURE_TOGGLE_ISCSI","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_MIRRORING":{"name":"FEATURE_TOGGLE_MIRRORING","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_NFS":{"name":"FEATURE_TOGGLE_NFS","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_RBD":{"name":"FEATURE_TOGGLE_RBD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_RGW":{"name":"FEATURE_TOGGLE_RGW","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE":{"name":"GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_PASSWORD":{"name":"GRAFANA_API_PASSWORD","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_SSL_VERIFY":{"name":"GRAFANA_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_URL":{"name":"GRAFANA_API_URL","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_USERNAME":{"name":"GRAFANA_API_USERNAME","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_FRONTEND_API_URL":{"name":"GRAFANA_FRONTEND_API_URL","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_UPDATE_DASHBOARDS":{"name":"GRAFANA_UPDATE_DASHBOARDS","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ISCSI_API_SSL_VERIFICATION":{"name":"ISCSI_API_SSL_VERIFICATION","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ISSUE_TRACKER_API_KEY":{"name":"ISSUE_TRACKER_API_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PROMETHEUS_API_HOST":{"name":"PROMETHEUS_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PROMETHEUS_API_SSL_VERIFY":{"name":"PROMETHEUS_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_COMPLEXITY_ENABLED":{"name":"PWD_POLICY_CHECK_COMPLEXITY_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_EXCLUSION_LIST_ENABLED":{"name":"PWD_POLICY_CHECK_EXCLUSION_LIST_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_LENGTH_ENABLED":{"name":"PWD_POLICY_CHECK_LENGTH_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_OLDPWD_ENABLED":{"name":"PWD_POLICY_CHECK_OLDPWD_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_REPETITIVE_CHARS_ENABLED":{"name":"PWD_POLICY_CHECK_REPETITIVE_CHARS_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_SEQUENTIAL_CHARS_ENABLED":{"name":"PWD_POLICY_CHECK_SEQUENTIAL_CHARS_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_USERNAME_ENABLED":{"name":"PWD_POLICY_CHECK_USERNAME_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_ENABLED":{"name":"PWD_POLICY_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_EXCLUSION_LIST":{"name":"PWD_POLICY_EXCLUSION_LIST","type":"str","level":"advanced","flags":0,"default_value":"osd,host,dashboard,pool,block,nfs,ceph,monitors,gateway,logs,crush,maps","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_MIN_COMPLEXITY":{"name":"PWD_POLICY_MIN_COMPLEXITY","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_MIN_LENGTH":{"name":"PWD_POLICY_MIN_LENGTH","type":"int","level":"advanced","flags":0,"default_value":"8","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"REST_REQUESTS_TIMEOUT":{"name":"REST_REQUESTS_TIMEOUT","type":"int","level":"advanced","flags":0,"default_value":"45","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_ACCESS_KEY":{"name":"RGW_API_ACCESS_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_ADMIN_RESOURCE":{"name":"RGW_API_ADMIN_RESOURCE","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SECRET_KEY":{"name":"RGW_API_SECRET_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SSL_VERIFY":{"name":"RGW_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_SPAN":{"name":"USER_PWD_EXPIRATION_SPAN","type":"int","level":"advanced","flags":0,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_WARNING_1":{"name":"USER_PWD_EXPIRATION_WARNING_1","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_WARNING_2":{"name":"USER_PWD_EXPIRATION_WARNING_2","type":"int","level":"advanced","flags":0,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"crt_file":{"name":"crt_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"debug":{"name":"debug","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable/disable debug options","long_desc":"","tags":[],"see_also":[]},"jwt_token_ttl":{"name":"jwt_token_ttl","type":"int","level":"advanced","flags":0,"default_value":"28800","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"key_file":{"name":"key_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"motd":{"name":"motd","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"The message of the day","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"::","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"int","level":"advanced","flags":0,"default_value":"8080","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl":{"name":"ssl","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl_server_port":{"name":"ssl_server_port","type":"int","level":"advanced","flags":0,"default_value":"8443","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_behaviour":{"name":"standby_behaviour","type":"str","level":"advanced","flags":0,"default_value":"redirect","min":"","max":"","enum_allowed":["error","redirect"],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_error_status_code":{"name":"standby_error_status_code","type":"int","level":"advanced","flags":0,"default_value":"500","min":"400","max":"599","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"url_prefix":{"name":"url_prefix","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"devicehealth","can_run":true,"error_string":"","module_options":{"enable_monitoring":{"name":"enable_monitoring","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"monitor device health metrics","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"mark_out_threshold":{"name":"mark_out_threshold","type":"secs","level":"advanced","flags":1,"default_value":"2419200","min":"","max":"","enum_allowed":[],"desc":"automatically mark OSD if it may fail before this long","long_desc":"","tags":[],"see_also":[]},"pool_name":{"name":"pool_name","type":"str","level":"advanced","flags":1,"default_value":"device_health_metrics","min":"","max":"","enum_allowed":[],"desc":"name of pool in which to store device health metrics","long_desc":"","tags":[],"see_also":[]},"retention_period":{"name":"retention_period","type":"secs","level":"advanced","flags":1,"default_value":"15552000","min":"","max":"","enum_allowed":[],"desc":"how long to retain device health metrics","long_desc":"","tags":[],"see_also":[]},"scrape_frequency":{"name":"scrape_frequency","type":"secs","level":"advanced","flags":1,"default_value":"86400","min":"","max":"","enum_allowed":[],"desc":"how frequently to scrape device health metrics","long_desc":"","tags":[],"see_also":[]},"self_heal":{"name":"self_heal","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"preemptively heal cluster around devices that may fail","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to wake up and check device health","long_desc":"","tags":[],"see_also":[]},"warn_threshold":{"name":"warn_threshold","type":"secs","level":"advanced","flags":1,"default_value":"7257600","min":"","max":"","enum_allowed":[],"desc":"raise health warning if OSD may fail before this long","long_desc":"","tags":[],"see_also":[]}}},{"name":"diskprediction_local","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"predict_interval":{"name":"predict_interval","type":"str","level":"advanced","flags":0,"default_value":"86400","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"predictor_model":{"name":"predictor_model","type":"str","level":"advanced","flags":0,"default_value":"prophetstor","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"str","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"influx","can_run":false,"error_string":"influxdb python module not found","module_options":{"batch_size":{"name":"batch_size","type":"int","level":"advanced","flags":0,"default_value":"5000","min":"","max":"","enum_allowed":[],"desc":"How big batches of data points should be when sending to InfluxDB.","long_desc":"","tags":[],"see_also":[]},"database":{"name":"database","type":"str","level":"advanced","flags":0,"default_value":"ceph","min":"","max":"","enum_allowed":[],"desc":"InfluxDB database name. You will need to create this database and grant write privileges to the configured username or the username must have admin privileges to create it.","long_desc":"","tags":[],"see_also":[]},"hostname":{"name":"hostname","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"InfluxDB server hostname","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"30","min":"5","max":"","enum_allowed":[],"desc":"Time between reports to InfluxDB. Default 30 seconds.","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"password":{"name":"password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"password of InfluxDB server user","long_desc":"","tags":[],"see_also":[]},"port":{"name":"port","type":"int","level":"advanced","flags":0,"default_value":"8086","min":"","max":"","enum_allowed":[],"desc":"InfluxDB server port","long_desc":"","tags":[],"see_also":[]},"ssl":{"name":"ssl","type":"str","level":"advanced","flags":0,"default_value":"false","min":"","max":"","enum_allowed":[],"desc":"Use https connection for InfluxDB server. Use \"true\" or \"false\".","long_desc":"","tags":[],"see_also":[]},"threads":{"name":"threads","type":"int","level":"advanced","flags":0,"default_value":"5","min":"1","max":"32","enum_allowed":[],"desc":"How many worker threads should be spawned for sending data to InfluxDB.","long_desc":"","tags":[],"see_also":[]},"username":{"name":"username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"username of InfluxDB server user","long_desc":"","tags":[],"see_also":[]},"verify_ssl":{"name":"verify_ssl","type":"str","level":"advanced","flags":0,"default_value":"true","min":"","max":"","enum_allowed":[],"desc":"Verify https cert for InfluxDB server. Use \"true\" or \"false\".","long_desc":"","tags":[],"see_also":[]}}},{"name":"insights","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"iostat","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"k8sevents","can_run":true,"error_string":"","module_options":{"ceph_event_retention_days":{"name":"ceph_event_retention_days","type":"int","level":"advanced","flags":0,"default_value":"7","min":"","max":"","enum_allowed":[],"desc":"Days to hold ceph event information within local cache","long_desc":"","tags":[],"see_also":[]},"config_check_secs":{"name":"config_check_secs","type":"int","level":"advanced","flags":0,"default_value":"10","min":"10","max":"","enum_allowed":[],"desc":"interval (secs) to check for cluster configuration changes","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"localpool","can_run":true,"error_string":"","module_options":{"failure_domain":{"name":"failure_domain","type":"str","level":"advanced","flags":1,"default_value":"host","min":"","max":"","enum_allowed":[],"desc":"failure domain for any created local pool","long_desc":"what failure domain we should separate data replicas across.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"min_size":{"name":"min_size","type":"int","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"default min_size for any created local pool","long_desc":"value to set min_size to (unchanged from Ceph's default if this option is not set)","tags":[],"see_also":[]},"num_rep":{"name":"num_rep","type":"int","level":"advanced","flags":1,"default_value":"3","min":"","max":"","enum_allowed":[],"desc":"default replica count for any created local pool","long_desc":"","tags":[],"see_also":[]},"pg_num":{"name":"pg_num","type":"int","level":"advanced","flags":1,"default_value":"128","min":"","max":"","enum_allowed":[],"desc":"default pg_num for any created local pool","long_desc":"","tags":[],"see_also":[]},"prefix":{"name":"prefix","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"name prefix for any created local pool","long_desc":"","tags":[],"see_also":[]},"subtree":{"name":"subtree","type":"str","level":"advanced","flags":1,"default_value":"rack","min":"","max":"","enum_allowed":[],"desc":"CRUSH level for which to create a local pool","long_desc":"which CRUSH subtree type the module should create a pool for.","tags":[],"see_also":[]}}},{"name":"mds_autoscaler","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"mirroring","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"nfs","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"orchestrator","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"orchestrator":{"name":"orchestrator","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["cephadm","rook","test_orchestrator"],"desc":"Orchestrator backend","long_desc":"","tags":[],"see_also":[]}}},{"name":"osd_perf_query","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"osd_support","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"pg_autoscaler","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"noautoscale":{"name":"noautoscale","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"global autoscale flag","long_desc":"Option to turn on/off the autoscaler for all pools","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"threshold":{"name":"threshold","type":"float","level":"advanced","flags":0,"default_value":"3.0","min":"1.0","max":"","enum_allowed":[],"desc":"scaling threshold","long_desc":"The factor by which the `NEW PG_NUM` must vary from the current`PG_NUM` before being accepted. Cannot be less than 1.0","tags":[],"see_also":[]}}},{"name":"progress","can_run":true,"error_string":"","module_options":{"allow_pg_recovery_event":{"name":"allow_pg_recovery_event","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow the module to show pg recovery progress","long_desc":"","tags":[],"see_also":[]},"enabled":{"name":"enabled","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_completed_events":{"name":"max_completed_events","type":"int","level":"advanced","flags":1,"default_value":"50","min":"","max":"","enum_allowed":[],"desc":"number of past completed events to remember","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"how long the module is going to sleep","long_desc":"","tags":[],"see_also":[]}}},{"name":"prometheus","can_run":true,"error_string":"","module_options":{"cache":{"name":"cache","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rbd_stats_pools":{"name":"rbd_stats_pools","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rbd_stats_pools_refresh_interval":{"name":"rbd_stats_pools_refresh_interval","type":"int","level":"advanced","flags":0,"default_value":"300","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"scrape_interval":{"name":"scrape_interval","type":"float","level":"advanced","flags":0,"default_value":"15.0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"::","min":"","max":"","enum_allowed":[],"desc":"the IPv4 or IPv6 address on which the module listens for HTTP requests","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"int","level":"advanced","flags":0,"default_value":"9283","min":"","max":"","enum_allowed":[],"desc":"the port on which the module listens for HTTP requests","long_desc":"","tags":[],"see_also":[]},"stale_cache_strategy":{"name":"stale_cache_strategy","type":"str","level":"advanced","flags":0,"default_value":"log","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_behaviour":{"name":"standby_behaviour","type":"str","level":"advanced","flags":1,"default_value":"default","min":"","max":"","enum_allowed":["default","error"],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_error_status_code":{"name":"standby_error_status_code","type":"int","level":"advanced","flags":1,"default_value":"500","min":"400","max":"599","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rbd_support","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_concurrent_snap_create":{"name":"max_concurrent_snap_create","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"mirror_snapshot_schedule":{"name":"mirror_snapshot_schedule","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"trash_purge_schedule":{"name":"trash_purge_schedule","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"restful","can_run":true,"error_string":"","module_options":{"enable_auth":{"name":"enable_auth","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"key_file":{"name":"key_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rook","can_run":true,"error_string":"","module_options":{"drive_group_interval":{"name":"drive_group_interval","type":"float","level":"advanced","flags":0,"default_value":"300.0","min":"","max":"","enum_allowed":[],"desc":"interval in seconds between re-application of applied drive_groups","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"storage_class":{"name":"storage_class","type":"str","level":"advanced","flags":0,"default_value":"local","min":"","max":"","enum_allowed":[],"desc":"storage class name for LSO-discovered PVs","long_desc":"","tags":[],"see_also":[]}}},{"name":"selftest","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"roption1":{"name":"roption1","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"roption2":{"name":"roption2","type":"str","level":"advanced","flags":0,"default_value":"xyz","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption1":{"name":"rwoption1","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption2":{"name":"rwoption2","type":"int","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption3":{"name":"rwoption3","type":"float","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption4":{"name":"rwoption4","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption5":{"name":"rwoption5","type":"bool","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption6":{"name":"rwoption6","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption7":{"name":"rwoption7","type":"int","level":"advanced","flags":0,"default_value":"","min":"1","max":"42","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testkey":{"name":"testkey","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testlkey":{"name":"testlkey","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testnewline":{"name":"testnewline","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"snap_schedule","can_run":true,"error_string":"","module_options":{"allow_m_granularity":{"name":"allow_m_granularity","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow minute scheduled snapshots","long_desc":"","tags":[],"see_also":[]},"dump_on_update":{"name":"dump_on_update","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"dump database to debug log on update","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"stats","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"status","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"telegraf","can_run":true,"error_string":"","module_options":{"address":{"name":"address","type":"str","level":"advanced","flags":0,"default_value":"unixgram:///tmp/telegraf.sock","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"15","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"telemetry","can_run":true,"error_string":"","module_options":{"channel_basic":{"name":"channel_basic","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share basic cluster information (size, version)","long_desc":"","tags":[],"see_also":[]},"channel_crash":{"name":"channel_crash","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share metadata about Ceph daemon crashes (version, stack straces, etc)","long_desc":"","tags":[],"see_also":[]},"channel_device":{"name":"channel_device","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share device health metrics (e.g., SMART data, minus potentially identifying info like serial numbers)","long_desc":"","tags":[],"see_also":[]},"channel_ident":{"name":"channel_ident","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Share a user-provided description and/or contact email for the cluster","long_desc":"","tags":[],"see_also":[]},"channel_perf":{"name":"channel_perf","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Share various performance metrics of a cluster","long_desc":"","tags":[],"see_also":[]},"contact":{"name":"contact","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"description":{"name":"description","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"device_url":{"name":"device_url","type":"str","level":"advanced","flags":0,"default_value":"https://telemetry.ceph.com/device","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"enabled":{"name":"enabled","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"int","level":"advanced","flags":0,"default_value":"24","min":"8","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"last_opt_revision":{"name":"last_opt_revision","type":"int","level":"advanced","flags":0,"default_value":"1","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"leaderboard":{"name":"leaderboard","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"organization":{"name":"organization","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"proxy":{"name":"proxy","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"url":{"name":"url","type":"str","level":"advanced","flags":0,"default_value":"https://telemetry.ceph.com/report","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"test_orchestrator","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"volumes","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_concurrent_clones":{"name":"max_concurrent_clones","type":"int","level":"advanced","flags":0,"default_value":"4","min":"","max":"","enum_allowed":[],"desc":"Number of asynchronous cloner threads","long_desc":"","tags":[],"see_also":[]},"snapshot_clone_delay":{"name":"snapshot_clone_delay","type":"int","level":"advanced","flags":0,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"Delay clone begin operation by snapshot_clone_delay seconds","long_desc":"","tags":[],"see_also":[]}}},{"name":"zabbix","can_run":true,"error_string":"","module_options":{"discovery_interval":{"name":"discovery_interval","type":"uint","level":"advanced","flags":0,"default_value":"100","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"identifier":{"name":"identifier","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_host":{"name":"zabbix_host","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_port":{"name":"zabbix_port","type":"int","level":"advanced","flags":0,"default_value":"10051","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_sender":{"name":"zabbix_sender","type":"str","level":"advanced","flags":0,"default_value":"/usr/bin/zabbix_sender","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}}],"services":{"dashboard":"https://192.168.123.102:8443/","prometheus":"http://192.168.123.102:9283/"},"always_on_modules":{"octopus":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"],"pacific":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"],"quincy":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"],"last_failure_osd_epoch":43,"active_clients":[{"addrvec":[{"type":"v2","addr":"192.168.123.102:0","nonce":2121258844}]},{"addrvec":[{"type":"v2","addr":"192.168.123.102:0","nonce":2765940575}]},{"addrvec":[{"type":"v2","addr":"192.168.123.102:0","nonce":2782992750}]},{"addrvec":[{"type":"v2","addr":"192.168.123.102:0","nonce":1681950952}]}]}} 2026-03-10T06:58:24.023 INFO:tasks.cephadm.ceph_manager.ceph:mgr available! 2026-03-10T06:58:24.024 INFO:tasks.cephadm.ceph_manager.ceph:waiting for all up 2026-03-10T06:58:24.024 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc -- ceph osd dump --format=json 2026-03-10T06:58:24.172 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/mon.a/config 2026-03-10T06:58:24.228 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 06:58:24 vm02 bash[68618]: Getting image source signatures 2026-03-10T06:58:24.228 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 06:58:24 vm02 bash[68618]: Copying blob sha256:b5db1e299295edf3005515ab7879c1df64a33c185d3a7a23aa4dcaa17d26f7b3 2026-03-10T06:58:24.228 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 06:58:24 vm02 bash[68618]: Copying blob sha256:aa2a8d90b84cb2a9c422e7005cd166a008ccf22ef5d7d4f07128478585ce35ea 2026-03-10T06:58:24.228 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 06:58:24 vm02 bash[68618]: Copying blob sha256:b45d31ee2d7f9f452678a85b0c837c29e12089f31ee8dbac6c8c24dfa4054a30 2026-03-10T06:58:24.532 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-10T06:58:24.532 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":43,"fsid":"28bd35e6-1c4e-11f1-9057-21b3549603fc","created":"2026-03-10T06:56:12.122649+0000","modified":"2026-03-10T06:58:19.501901+0000","last_up_change":"2026-03-10T06:58:11.385881+0000","last_in_change":"2026-03-10T06:58:02.296504+0000","flags":"sortbitwise,recovery_deletes,purged_snapdirs,pglog_hardlimit","flags_num":5799936,"flags_set":["pglog_hardlimit","purged_snapdirs","recovery_deletes","sortbitwise"],"crush_version":18,"full_ratio":0.94999998807907104,"backfillfull_ratio":0.89999997615814209,"nearfull_ratio":0.85000002384185791,"cluster_snapshot":"","pool_max":1,"max_osd":8,"require_min_compat_client":"luminous","min_compat_client":"jewel","require_osd_release":"quincy","pools":[{"pool":1,"pool_name":".mgr","create_time":"2026-03-10T06:57:29.589386+0000","flags":1,"flags_names":"hashpspool","type":1,"size":3,"min_size":2,"crush_rule":0,"peering_crush_bucket_count":0,"peering_crush_bucket_target":0,"peering_crush_bucket_barrier":0,"peering_crush_bucket_mandatory_member":2147483647,"object_hash":2,"pg_autoscale_mode":"off","pg_num":1,"pg_placement_num":1,"pg_placement_num_target":1,"pg_num_target":1,"pg_num_pending":1,"last_pg_merge_meta":{"source_pgid":"0.0","ready_epoch":0,"last_epoch_started":0,"last_epoch_clean":0,"source_version":"0'0","target_version":"0'0"},"last_change":"18","last_force_op_resend":"0","last_force_op_resend_prenautilus":"0","last_force_op_resend_preluminous":"0","auid":0,"snap_mode":"selfmanaged","snap_seq":0,"snap_epoch":0,"pool_snaps":[],"removed_snaps":"[]","quota_max_bytes":0,"quota_max_objects":0,"tiers":[],"tier_of":-1,"read_tier":-1,"write_tier":-1,"cache_mode":"none","target_max_bytes":0,"target_max_objects":0,"cache_target_dirty_ratio_micro":400000,"cache_target_dirty_high_ratio_micro":600000,"cache_target_full_ratio_micro":800000,"cache_min_flush_age":0,"cache_min_evict_age":0,"erasure_code_profile":"","hit_set_params":{"type":"none"},"hit_set_period":0,"hit_set_count":0,"use_gmt_hitset":true,"min_read_recency_for_promote":0,"min_write_recency_for_promote":0,"hit_set_grade_decay_rate":0,"hit_set_search_last_n":0,"grade_table":[],"stripe_width":0,"expected_num_objects":0,"fast_read":false,"options":{"pg_num_max":32,"pg_num_min":1},"application_metadata":{"mgr":{}}}],"osds":[{"osd":0,"uuid":"35204e1a-6579-424a-9923-9986832c655c","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":8,"up_thru":40,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6802","nonce":1562453044},{"type":"v1","addr":"192.168.123.102:6803","nonce":1562453044}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6804","nonce":1562453044},{"type":"v1","addr":"192.168.123.102:6805","nonce":1562453044}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6808","nonce":1562453044},{"type":"v1","addr":"192.168.123.102:6809","nonce":1562453044}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6806","nonce":1562453044},{"type":"v1","addr":"192.168.123.102:6807","nonce":1562453044}]},"public_addr":"192.168.123.102:6803/1562453044","cluster_addr":"192.168.123.102:6805/1562453044","heartbeat_back_addr":"192.168.123.102:6809/1562453044","heartbeat_front_addr":"192.168.123.102:6807/1562453044","state":["exists","up"]},{"osd":1,"uuid":"eb30f5d3-2080-416b-b174-8350b4a9f19d","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":11,"up_thru":25,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6810","nonce":2672109890},{"type":"v1","addr":"192.168.123.102:6811","nonce":2672109890}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6812","nonce":2672109890},{"type":"v1","addr":"192.168.123.102:6813","nonce":2672109890}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6816","nonce":2672109890},{"type":"v1","addr":"192.168.123.102:6817","nonce":2672109890}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6814","nonce":2672109890},{"type":"v1","addr":"192.168.123.102:6815","nonce":2672109890}]},"public_addr":"192.168.123.102:6811/2672109890","cluster_addr":"192.168.123.102:6813/2672109890","heartbeat_back_addr":"192.168.123.102:6817/2672109890","heartbeat_front_addr":"192.168.123.102:6815/2672109890","state":["exists","up"]},{"osd":2,"uuid":"dd1d0f2f-d7ae-4bd8-8eb5-71d68bfb7ea3","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":14,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6818","nonce":3047564050},{"type":"v1","addr":"192.168.123.102:6819","nonce":3047564050}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6820","nonce":3047564050},{"type":"v1","addr":"192.168.123.102:6821","nonce":3047564050}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6824","nonce":3047564050},{"type":"v1","addr":"192.168.123.102:6825","nonce":3047564050}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6822","nonce":3047564050},{"type":"v1","addr":"192.168.123.102:6823","nonce":3047564050}]},"public_addr":"192.168.123.102:6819/3047564050","cluster_addr":"192.168.123.102:6821/3047564050","heartbeat_back_addr":"192.168.123.102:6825/3047564050","heartbeat_front_addr":"192.168.123.102:6823/3047564050","state":["exists","up"]},{"osd":3,"uuid":"3a40e513-6ad9-43f1-ba74-0b37d785fad9","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":21,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6826","nonce":3907781773},{"type":"v1","addr":"192.168.123.102:6827","nonce":3907781773}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6828","nonce":3907781773},{"type":"v1","addr":"192.168.123.102:6829","nonce":3907781773}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6832","nonce":3907781773},{"type":"v1","addr":"192.168.123.102:6833","nonce":3907781773}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6830","nonce":3907781773},{"type":"v1","addr":"192.168.123.102:6831","nonce":3907781773}]},"public_addr":"192.168.123.102:6827/3907781773","cluster_addr":"192.168.123.102:6829/3907781773","heartbeat_back_addr":"192.168.123.102:6833/3907781773","heartbeat_front_addr":"192.168.123.102:6831/3907781773","state":["exists","up"]},{"osd":4,"uuid":"fa4107e7-dcd8-4c34-a994-7706880ac944","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":24,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6800","nonce":3902980907},{"type":"v1","addr":"192.168.123.105:6801","nonce":3902980907}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6802","nonce":3902980907},{"type":"v1","addr":"192.168.123.105:6803","nonce":3902980907}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6806","nonce":3902980907},{"type":"v1","addr":"192.168.123.105:6807","nonce":3902980907}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6804","nonce":3902980907},{"type":"v1","addr":"192.168.123.105:6805","nonce":3902980907}]},"public_addr":"192.168.123.105:6801/3902980907","cluster_addr":"192.168.123.105:6803/3902980907","heartbeat_back_addr":"192.168.123.105:6807/3902980907","heartbeat_front_addr":"192.168.123.105:6805/3902980907","state":["exists","up"]},{"osd":5,"uuid":"4cf77df5-c5c7-4d20-b47b-ed4598f3fb56","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":29,"up_thru":30,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6808","nonce":3731608217},{"type":"v1","addr":"192.168.123.105:6809","nonce":3731608217}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6810","nonce":3731608217},{"type":"v1","addr":"192.168.123.105:6811","nonce":3731608217}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6814","nonce":3731608217},{"type":"v1","addr":"192.168.123.105:6815","nonce":3731608217}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6812","nonce":3731608217},{"type":"v1","addr":"192.168.123.105:6813","nonce":3731608217}]},"public_addr":"192.168.123.105:6809/3731608217","cluster_addr":"192.168.123.105:6811/3731608217","heartbeat_back_addr":"192.168.123.105:6815/3731608217","heartbeat_front_addr":"192.168.123.105:6813/3731608217","state":["exists","up"]},{"osd":6,"uuid":"d5098a9b-8d57-4249-b546-8ac52d23059a","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":34,"up_thru":35,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6816","nonce":576011160},{"type":"v1","addr":"192.168.123.105:6817","nonce":576011160}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6818","nonce":576011160},{"type":"v1","addr":"192.168.123.105:6819","nonce":576011160}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6822","nonce":576011160},{"type":"v1","addr":"192.168.123.105:6823","nonce":576011160}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6820","nonce":576011160},{"type":"v1","addr":"192.168.123.105:6821","nonce":576011160}]},"public_addr":"192.168.123.105:6817/576011160","cluster_addr":"192.168.123.105:6819/576011160","heartbeat_back_addr":"192.168.123.105:6823/576011160","heartbeat_front_addr":"192.168.123.105:6821/576011160","state":["exists","up"]},{"osd":7,"uuid":"abb08ecf-81bc-4ddf-8ab4-1c14d5086c97","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":40,"up_thru":41,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6824","nonce":3692745270},{"type":"v1","addr":"192.168.123.105:6825","nonce":3692745270}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6826","nonce":3692745270},{"type":"v1","addr":"192.168.123.105:6827","nonce":3692745270}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6830","nonce":3692745270},{"type":"v1","addr":"192.168.123.105:6831","nonce":3692745270}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6828","nonce":3692745270},{"type":"v1","addr":"192.168.123.105:6829","nonce":3692745270}]},"public_addr":"192.168.123.105:6825/3692745270","cluster_addr":"192.168.123.105:6827/3692745270","heartbeat_back_addr":"192.168.123.105:6831/3692745270","heartbeat_front_addr":"192.168.123.105:6829/3692745270","state":["exists","up"]}],"osd_xinfo":[{"osd":0,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T06:57:11.431026+0000","dead_epoch":0},{"osd":1,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T06:57:18.850301+0000","dead_epoch":0},{"osd":2,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T06:57:27.220686+0000","dead_epoch":0},{"osd":3,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T06:57:37.296305+0000","dead_epoch":0},{"osd":4,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T06:57:44.191271+0000","dead_epoch":0},{"osd":5,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T06:57:52.476146+0000","dead_epoch":0},{"osd":6,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T06:58:01.062278+0000","dead_epoch":0},{"osd":7,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T06:58:09.862825+0000","dead_epoch":0}],"pg_upmap":[],"pg_upmap_items":[],"pg_temp":[],"primary_temp":[],"blocklist":{"192.168.123.102:6801/301535323":"2026-03-11T06:58:19.501876+0000","192.168.123.102:0/2239439801":"2026-03-11T06:58:19.501876+0000","192.168.123.102:6800/301535323":"2026-03-11T06:58:19.501876+0000","192.168.123.102:0/2777838814":"2026-03-11T06:56:37.324419+0000","192.168.123.102:0/1263130046":"2026-03-11T06:56:37.324419+0000","192.168.123.102:0/4171767778":"2026-03-11T06:58:19.501876+0000","192.168.123.102:6801/1096882102":"2026-03-11T06:56:37.324419+0000","192.168.123.102:6800/1096882102":"2026-03-11T06:56:37.324419+0000","192.168.123.102:0/2455034139":"2026-03-11T06:56:37.324419+0000","192.168.123.102:6800/3034129265":"2026-03-11T06:56:27.474743+0000","192.168.123.102:0/3993166382":"2026-03-11T06:56:27.474743+0000","192.168.123.102:6801/3034129265":"2026-03-11T06:56:27.474743+0000","192.168.123.102:0/2512740709":"2026-03-11T06:58:19.501876+0000","192.168.123.102:0/370820550":"2026-03-11T06:56:27.474743+0000","192.168.123.102:0/1019363220":"2026-03-11T06:58:19.501876+0000","192.168.123.102:0/1375953868":"2026-03-11T06:56:27.474743+0000"},"erasure_code_profiles":{"default":{"crush-failure-domain":"osd","k":"2","m":"1","plugin":"jerasure","technique":"reed_sol_van"}},"removed_snaps_queue":[],"new_removed_snaps":[],"new_purged_snaps":[],"crush_node_flags":{},"device_class_flags":{},"stretch_mode":{"stretch_mode_enabled":false,"stretch_bucket_count":0,"degraded_stretch_mode":0,"recovering_stretch_mode":0,"stretch_mode_bucket":0}} 2026-03-10T06:58:24.611 INFO:tasks.cephadm.ceph_manager.ceph:all up! 2026-03-10T06:58:24.611 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc -- ceph osd dump --format=json 2026-03-10T06:58:24.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:24 vm05 ceph-mon[48591]: from='client.? 192.168.123.102:0/674093890' entity='client.admin' cmd=[{"prefix": "mgr dump", "format": "json"}]: dispatch 2026-03-10T06:58:24.814 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/mon.a/config 2026-03-10T06:58:24.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:24 vm02 ceph-mon[50158]: from='client.? 192.168.123.102:0/674093890' entity='client.admin' cmd=[{"prefix": "mgr dump", "format": "json"}]: dispatch 2026-03-10T06:58:24.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:24 vm02 ceph-mon[54377]: from='client.? 192.168.123.102:0/674093890' entity='client.admin' cmd=[{"prefix": "mgr dump", "format": "json"}]: dispatch 2026-03-10T06:58:25.213 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-10T06:58:25.213 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":43,"fsid":"28bd35e6-1c4e-11f1-9057-21b3549603fc","created":"2026-03-10T06:56:12.122649+0000","modified":"2026-03-10T06:58:19.501901+0000","last_up_change":"2026-03-10T06:58:11.385881+0000","last_in_change":"2026-03-10T06:58:02.296504+0000","flags":"sortbitwise,recovery_deletes,purged_snapdirs,pglog_hardlimit","flags_num":5799936,"flags_set":["pglog_hardlimit","purged_snapdirs","recovery_deletes","sortbitwise"],"crush_version":18,"full_ratio":0.94999998807907104,"backfillfull_ratio":0.89999997615814209,"nearfull_ratio":0.85000002384185791,"cluster_snapshot":"","pool_max":1,"max_osd":8,"require_min_compat_client":"luminous","min_compat_client":"jewel","require_osd_release":"quincy","pools":[{"pool":1,"pool_name":".mgr","create_time":"2026-03-10T06:57:29.589386+0000","flags":1,"flags_names":"hashpspool","type":1,"size":3,"min_size":2,"crush_rule":0,"peering_crush_bucket_count":0,"peering_crush_bucket_target":0,"peering_crush_bucket_barrier":0,"peering_crush_bucket_mandatory_member":2147483647,"object_hash":2,"pg_autoscale_mode":"off","pg_num":1,"pg_placement_num":1,"pg_placement_num_target":1,"pg_num_target":1,"pg_num_pending":1,"last_pg_merge_meta":{"source_pgid":"0.0","ready_epoch":0,"last_epoch_started":0,"last_epoch_clean":0,"source_version":"0'0","target_version":"0'0"},"last_change":"18","last_force_op_resend":"0","last_force_op_resend_prenautilus":"0","last_force_op_resend_preluminous":"0","auid":0,"snap_mode":"selfmanaged","snap_seq":0,"snap_epoch":0,"pool_snaps":[],"removed_snaps":"[]","quota_max_bytes":0,"quota_max_objects":0,"tiers":[],"tier_of":-1,"read_tier":-1,"write_tier":-1,"cache_mode":"none","target_max_bytes":0,"target_max_objects":0,"cache_target_dirty_ratio_micro":400000,"cache_target_dirty_high_ratio_micro":600000,"cache_target_full_ratio_micro":800000,"cache_min_flush_age":0,"cache_min_evict_age":0,"erasure_code_profile":"","hit_set_params":{"type":"none"},"hit_set_period":0,"hit_set_count":0,"use_gmt_hitset":true,"min_read_recency_for_promote":0,"min_write_recency_for_promote":0,"hit_set_grade_decay_rate":0,"hit_set_search_last_n":0,"grade_table":[],"stripe_width":0,"expected_num_objects":0,"fast_read":false,"options":{"pg_num_max":32,"pg_num_min":1},"application_metadata":{"mgr":{}}}],"osds":[{"osd":0,"uuid":"35204e1a-6579-424a-9923-9986832c655c","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":8,"up_thru":40,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6802","nonce":1562453044},{"type":"v1","addr":"192.168.123.102:6803","nonce":1562453044}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6804","nonce":1562453044},{"type":"v1","addr":"192.168.123.102:6805","nonce":1562453044}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6808","nonce":1562453044},{"type":"v1","addr":"192.168.123.102:6809","nonce":1562453044}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6806","nonce":1562453044},{"type":"v1","addr":"192.168.123.102:6807","nonce":1562453044}]},"public_addr":"192.168.123.102:6803/1562453044","cluster_addr":"192.168.123.102:6805/1562453044","heartbeat_back_addr":"192.168.123.102:6809/1562453044","heartbeat_front_addr":"192.168.123.102:6807/1562453044","state":["exists","up"]},{"osd":1,"uuid":"eb30f5d3-2080-416b-b174-8350b4a9f19d","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":11,"up_thru":25,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6810","nonce":2672109890},{"type":"v1","addr":"192.168.123.102:6811","nonce":2672109890}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6812","nonce":2672109890},{"type":"v1","addr":"192.168.123.102:6813","nonce":2672109890}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6816","nonce":2672109890},{"type":"v1","addr":"192.168.123.102:6817","nonce":2672109890}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6814","nonce":2672109890},{"type":"v1","addr":"192.168.123.102:6815","nonce":2672109890}]},"public_addr":"192.168.123.102:6811/2672109890","cluster_addr":"192.168.123.102:6813/2672109890","heartbeat_back_addr":"192.168.123.102:6817/2672109890","heartbeat_front_addr":"192.168.123.102:6815/2672109890","state":["exists","up"]},{"osd":2,"uuid":"dd1d0f2f-d7ae-4bd8-8eb5-71d68bfb7ea3","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":14,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6818","nonce":3047564050},{"type":"v1","addr":"192.168.123.102:6819","nonce":3047564050}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6820","nonce":3047564050},{"type":"v1","addr":"192.168.123.102:6821","nonce":3047564050}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6824","nonce":3047564050},{"type":"v1","addr":"192.168.123.102:6825","nonce":3047564050}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6822","nonce":3047564050},{"type":"v1","addr":"192.168.123.102:6823","nonce":3047564050}]},"public_addr":"192.168.123.102:6819/3047564050","cluster_addr":"192.168.123.102:6821/3047564050","heartbeat_back_addr":"192.168.123.102:6825/3047564050","heartbeat_front_addr":"192.168.123.102:6823/3047564050","state":["exists","up"]},{"osd":3,"uuid":"3a40e513-6ad9-43f1-ba74-0b37d785fad9","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":21,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6826","nonce":3907781773},{"type":"v1","addr":"192.168.123.102:6827","nonce":3907781773}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6828","nonce":3907781773},{"type":"v1","addr":"192.168.123.102:6829","nonce":3907781773}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6832","nonce":3907781773},{"type":"v1","addr":"192.168.123.102:6833","nonce":3907781773}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6830","nonce":3907781773},{"type":"v1","addr":"192.168.123.102:6831","nonce":3907781773}]},"public_addr":"192.168.123.102:6827/3907781773","cluster_addr":"192.168.123.102:6829/3907781773","heartbeat_back_addr":"192.168.123.102:6833/3907781773","heartbeat_front_addr":"192.168.123.102:6831/3907781773","state":["exists","up"]},{"osd":4,"uuid":"fa4107e7-dcd8-4c34-a994-7706880ac944","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":24,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6800","nonce":3902980907},{"type":"v1","addr":"192.168.123.105:6801","nonce":3902980907}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6802","nonce":3902980907},{"type":"v1","addr":"192.168.123.105:6803","nonce":3902980907}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6806","nonce":3902980907},{"type":"v1","addr":"192.168.123.105:6807","nonce":3902980907}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6804","nonce":3902980907},{"type":"v1","addr":"192.168.123.105:6805","nonce":3902980907}]},"public_addr":"192.168.123.105:6801/3902980907","cluster_addr":"192.168.123.105:6803/3902980907","heartbeat_back_addr":"192.168.123.105:6807/3902980907","heartbeat_front_addr":"192.168.123.105:6805/3902980907","state":["exists","up"]},{"osd":5,"uuid":"4cf77df5-c5c7-4d20-b47b-ed4598f3fb56","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":29,"up_thru":30,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6808","nonce":3731608217},{"type":"v1","addr":"192.168.123.105:6809","nonce":3731608217}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6810","nonce":3731608217},{"type":"v1","addr":"192.168.123.105:6811","nonce":3731608217}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6814","nonce":3731608217},{"type":"v1","addr":"192.168.123.105:6815","nonce":3731608217}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6812","nonce":3731608217},{"type":"v1","addr":"192.168.123.105:6813","nonce":3731608217}]},"public_addr":"192.168.123.105:6809/3731608217","cluster_addr":"192.168.123.105:6811/3731608217","heartbeat_back_addr":"192.168.123.105:6815/3731608217","heartbeat_front_addr":"192.168.123.105:6813/3731608217","state":["exists","up"]},{"osd":6,"uuid":"d5098a9b-8d57-4249-b546-8ac52d23059a","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":34,"up_thru":35,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6816","nonce":576011160},{"type":"v1","addr":"192.168.123.105:6817","nonce":576011160}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6818","nonce":576011160},{"type":"v1","addr":"192.168.123.105:6819","nonce":576011160}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6822","nonce":576011160},{"type":"v1","addr":"192.168.123.105:6823","nonce":576011160}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6820","nonce":576011160},{"type":"v1","addr":"192.168.123.105:6821","nonce":576011160}]},"public_addr":"192.168.123.105:6817/576011160","cluster_addr":"192.168.123.105:6819/576011160","heartbeat_back_addr":"192.168.123.105:6823/576011160","heartbeat_front_addr":"192.168.123.105:6821/576011160","state":["exists","up"]},{"osd":7,"uuid":"abb08ecf-81bc-4ddf-8ab4-1c14d5086c97","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":40,"up_thru":41,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6824","nonce":3692745270},{"type":"v1","addr":"192.168.123.105:6825","nonce":3692745270}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6826","nonce":3692745270},{"type":"v1","addr":"192.168.123.105:6827","nonce":3692745270}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6830","nonce":3692745270},{"type":"v1","addr":"192.168.123.105:6831","nonce":3692745270}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6828","nonce":3692745270},{"type":"v1","addr":"192.168.123.105:6829","nonce":3692745270}]},"public_addr":"192.168.123.105:6825/3692745270","cluster_addr":"192.168.123.105:6827/3692745270","heartbeat_back_addr":"192.168.123.105:6831/3692745270","heartbeat_front_addr":"192.168.123.105:6829/3692745270","state":["exists","up"]}],"osd_xinfo":[{"osd":0,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T06:57:11.431026+0000","dead_epoch":0},{"osd":1,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T06:57:18.850301+0000","dead_epoch":0},{"osd":2,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T06:57:27.220686+0000","dead_epoch":0},{"osd":3,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T06:57:37.296305+0000","dead_epoch":0},{"osd":4,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T06:57:44.191271+0000","dead_epoch":0},{"osd":5,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T06:57:52.476146+0000","dead_epoch":0},{"osd":6,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T06:58:01.062278+0000","dead_epoch":0},{"osd":7,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T06:58:09.862825+0000","dead_epoch":0}],"pg_upmap":[],"pg_upmap_items":[],"pg_temp":[],"primary_temp":[],"blocklist":{"192.168.123.102:6801/301535323":"2026-03-11T06:58:19.501876+0000","192.168.123.102:0/2239439801":"2026-03-11T06:58:19.501876+0000","192.168.123.102:6800/301535323":"2026-03-11T06:58:19.501876+0000","192.168.123.102:0/2777838814":"2026-03-11T06:56:37.324419+0000","192.168.123.102:0/1263130046":"2026-03-11T06:56:37.324419+0000","192.168.123.102:0/4171767778":"2026-03-11T06:58:19.501876+0000","192.168.123.102:6801/1096882102":"2026-03-11T06:56:37.324419+0000","192.168.123.102:6800/1096882102":"2026-03-11T06:56:37.324419+0000","192.168.123.102:0/2455034139":"2026-03-11T06:56:37.324419+0000","192.168.123.102:6800/3034129265":"2026-03-11T06:56:27.474743+0000","192.168.123.102:0/3993166382":"2026-03-11T06:56:27.474743+0000","192.168.123.102:6801/3034129265":"2026-03-11T06:56:27.474743+0000","192.168.123.102:0/2512740709":"2026-03-11T06:58:19.501876+0000","192.168.123.102:0/370820550":"2026-03-11T06:56:27.474743+0000","192.168.123.102:0/1019363220":"2026-03-11T06:58:19.501876+0000","192.168.123.102:0/1375953868":"2026-03-11T06:56:27.474743+0000"},"erasure_code_profiles":{"default":{"crush-failure-domain":"osd","k":"2","m":"1","plugin":"jerasure","technique":"reed_sol_van"}},"removed_snaps_queue":[],"new_removed_snaps":[],"new_purged_snaps":[],"crush_node_flags":{},"device_class_flags":{},"stretch_mode":{"stretch_mode_enabled":false,"stretch_bucket_count":0,"degraded_stretch_mode":0,"recovering_stretch_mode":0,"stretch_mode_bucket":0}} 2026-03-10T06:58:25.213 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 06:58:24 vm02 bash[68618]: Copying config sha256:1dbe0e931976487e20e5cfb272087e08a9779c88fd5e9617ed7042dd9751ec26 2026-03-10T06:58:25.213 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 06:58:24 vm02 bash[68618]: Writing manifest to image destination 2026-03-10T06:58:25.213 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 06:58:24 vm02 podman[68618]: 2026-03-10 06:58:24.965556881 +0000 UTC m=+2.105609166 container create 79b6fdafea6ad96214035972a09354bc27a3d556bc56cc058156a03f7f9f6ce4 (image=quay.io/prometheus/node-exporter:v1.3.1, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a, maintainer=The Prometheus Authors ) 2026-03-10T06:58:25.213 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 06:58:24 vm02 podman[68618]: 2026-03-10 06:58:24.959008073 +0000 UTC m=+2.099060368 image pull 1dbe0e931976487e20e5cfb272087e08a9779c88fd5e9617ed7042dd9751ec26 quay.io/prometheus/node-exporter:v1.3.1 2026-03-10T06:58:25.214 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 06:58:24 vm02 podman[68618]: 2026-03-10 06:58:24.990437553 +0000 UTC m=+2.130489838 container init 79b6fdafea6ad96214035972a09354bc27a3d556bc56cc058156a03f7f9f6ce4 (image=quay.io/prometheus/node-exporter:v1.3.1, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a, maintainer=The Prometheus Authors ) 2026-03-10T06:58:25.214 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 06:58:24 vm02 podman[68618]: 2026-03-10 06:58:24.993516165 +0000 UTC m=+2.133568450 container start 79b6fdafea6ad96214035972a09354bc27a3d556bc56cc058156a03f7f9f6ce4 (image=quay.io/prometheus/node-exporter:v1.3.1, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a, maintainer=The Prometheus Authors ) 2026-03-10T06:58:25.214 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 06:58:24 vm02 bash[68618]: 79b6fdafea6ad96214035972a09354bc27a3d556bc56cc058156a03f7f9f6ce4 2026-03-10T06:58:25.214 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 06:58:24 vm02 systemd[1]: Started Ceph node-exporter.a for 28bd35e6-1c4e-11f1-9057-21b3549603fc. 2026-03-10T06:58:25.214 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 06:58:25 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[68996]: ts=2026-03-10T06:58:25.001Z caller=node_exporter.go:182 level=info msg="Starting node_exporter" version="(version=1.3.1, branch=HEAD, revision=a2321e7b940ddcff26873612bccdf7cd4c42b6b6)" 2026-03-10T06:58:25.214 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 06:58:25 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[68996]: ts=2026-03-10T06:58:25.001Z caller=node_exporter.go:183 level=info msg="Build context" build_context="(go=go1.17.3, user=root@243aafa5525c, date=20211205-11:09:49)" 2026-03-10T06:58:25.214 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 06:58:25 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[68996]: ts=2026-03-10T06:58:25.002Z caller=filesystem_common.go:111 level=info collector=filesystem msg="Parsed flag --collector.filesystem.mount-points-exclude" flag=^/(dev|proc|run/credentials/.+|sys|var/lib/docker/.+)($|/) 2026-03-10T06:58:25.214 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 06:58:25 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[68996]: ts=2026-03-10T06:58:25.002Z caller=filesystem_common.go:113 level=info collector=filesystem msg="Parsed flag --collector.filesystem.fs-types-exclude" flag=^(autofs|binfmt_misc|bpf|cgroup2?|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|iso9660|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|sysfs|tracefs)$ 2026-03-10T06:58:25.214 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 06:58:25 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[68996]: ts=2026-03-10T06:58:25.002Z caller=node_exporter.go:108 level=info msg="Enabled collectors" 2026-03-10T06:58:25.214 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 06:58:25 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[68996]: ts=2026-03-10T06:58:25.002Z caller=node_exporter.go:115 level=info collector=arp 2026-03-10T06:58:25.214 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 06:58:25 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[68996]: ts=2026-03-10T06:58:25.002Z caller=node_exporter.go:115 level=info collector=bcache 2026-03-10T06:58:25.214 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 06:58:25 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[68996]: ts=2026-03-10T06:58:25.002Z caller=node_exporter.go:115 level=info collector=bonding 2026-03-10T06:58:25.214 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 06:58:25 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[68996]: ts=2026-03-10T06:58:25.002Z caller=node_exporter.go:115 level=info collector=btrfs 2026-03-10T06:58:25.214 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 06:58:25 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[68996]: ts=2026-03-10T06:58:25.002Z caller=node_exporter.go:115 level=info collector=conntrack 2026-03-10T06:58:25.214 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 06:58:25 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[68996]: ts=2026-03-10T06:58:25.002Z caller=node_exporter.go:115 level=info collector=cpu 2026-03-10T06:58:25.214 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 06:58:25 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[68996]: ts=2026-03-10T06:58:25.002Z caller=node_exporter.go:115 level=info collector=cpufreq 2026-03-10T06:58:25.214 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 06:58:25 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[68996]: ts=2026-03-10T06:58:25.002Z caller=node_exporter.go:115 level=info collector=diskstats 2026-03-10T06:58:25.214 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 06:58:25 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[68996]: ts=2026-03-10T06:58:25.002Z caller=node_exporter.go:115 level=info collector=dmi 2026-03-10T06:58:25.214 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 06:58:25 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[68996]: ts=2026-03-10T06:58:25.002Z caller=node_exporter.go:115 level=info collector=edac 2026-03-10T06:58:25.214 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 06:58:25 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[68996]: ts=2026-03-10T06:58:25.002Z caller=node_exporter.go:115 level=info collector=entropy 2026-03-10T06:58:25.214 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 06:58:25 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[68996]: ts=2026-03-10T06:58:25.002Z caller=node_exporter.go:115 level=info collector=fibrechannel 2026-03-10T06:58:25.214 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 06:58:25 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[68996]: ts=2026-03-10T06:58:25.002Z caller=node_exporter.go:115 level=info collector=filefd 2026-03-10T06:58:25.214 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 06:58:25 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[68996]: ts=2026-03-10T06:58:25.002Z caller=node_exporter.go:115 level=info collector=filesystem 2026-03-10T06:58:25.214 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 06:58:25 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[68996]: ts=2026-03-10T06:58:25.002Z caller=node_exporter.go:115 level=info collector=hwmon 2026-03-10T06:58:25.214 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 06:58:25 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[68996]: ts=2026-03-10T06:58:25.002Z caller=node_exporter.go:115 level=info collector=infiniband 2026-03-10T06:58:25.214 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 06:58:25 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[68996]: ts=2026-03-10T06:58:25.002Z caller=node_exporter.go:115 level=info collector=ipvs 2026-03-10T06:58:25.214 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 06:58:25 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[68996]: ts=2026-03-10T06:58:25.002Z caller=node_exporter.go:115 level=info collector=loadavg 2026-03-10T06:58:25.214 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 06:58:25 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[68996]: ts=2026-03-10T06:58:25.002Z caller=node_exporter.go:115 level=info collector=mdadm 2026-03-10T06:58:25.214 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 06:58:25 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[68996]: ts=2026-03-10T06:58:25.002Z caller=node_exporter.go:115 level=info collector=meminfo 2026-03-10T06:58:25.214 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 06:58:25 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[68996]: ts=2026-03-10T06:58:25.002Z caller=node_exporter.go:115 level=info collector=netclass 2026-03-10T06:58:25.214 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 06:58:25 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[68996]: ts=2026-03-10T06:58:25.002Z caller=node_exporter.go:115 level=info collector=netdev 2026-03-10T06:58:25.214 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 06:58:25 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[68996]: ts=2026-03-10T06:58:25.002Z caller=node_exporter.go:115 level=info collector=netstat 2026-03-10T06:58:25.214 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 06:58:25 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[68996]: ts=2026-03-10T06:58:25.002Z caller=node_exporter.go:115 level=info collector=nfs 2026-03-10T06:58:25.214 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 06:58:25 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[68996]: ts=2026-03-10T06:58:25.002Z caller=node_exporter.go:115 level=info collector=nfsd 2026-03-10T06:58:25.214 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 06:58:25 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[68996]: ts=2026-03-10T06:58:25.002Z caller=node_exporter.go:115 level=info collector=nvme 2026-03-10T06:58:25.214 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 06:58:25 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[68996]: ts=2026-03-10T06:58:25.002Z caller=node_exporter.go:115 level=info collector=os 2026-03-10T06:58:25.214 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 06:58:25 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[68996]: ts=2026-03-10T06:58:25.002Z caller=node_exporter.go:115 level=info collector=powersupplyclass 2026-03-10T06:58:25.214 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 06:58:25 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[68996]: ts=2026-03-10T06:58:25.002Z caller=node_exporter.go:115 level=info collector=pressure 2026-03-10T06:58:25.214 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 06:58:25 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[68996]: ts=2026-03-10T06:58:25.002Z caller=node_exporter.go:115 level=info collector=rapl 2026-03-10T06:58:25.214 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 06:58:25 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[68996]: ts=2026-03-10T06:58:25.002Z caller=node_exporter.go:115 level=info collector=schedstat 2026-03-10T06:58:25.214 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 06:58:25 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[68996]: ts=2026-03-10T06:58:25.002Z caller=node_exporter.go:115 level=info collector=sockstat 2026-03-10T06:58:25.214 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 06:58:25 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[68996]: ts=2026-03-10T06:58:25.002Z caller=node_exporter.go:115 level=info collector=softnet 2026-03-10T06:58:25.214 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 06:58:25 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[68996]: ts=2026-03-10T06:58:25.002Z caller=node_exporter.go:115 level=info collector=stat 2026-03-10T06:58:25.214 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 06:58:25 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[68996]: ts=2026-03-10T06:58:25.002Z caller=node_exporter.go:115 level=info collector=tapestats 2026-03-10T06:58:25.214 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 06:58:25 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[68996]: ts=2026-03-10T06:58:25.002Z caller=node_exporter.go:115 level=info collector=textfile 2026-03-10T06:58:25.214 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 06:58:25 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[68996]: ts=2026-03-10T06:58:25.002Z caller=node_exporter.go:115 level=info collector=thermal_zone 2026-03-10T06:58:25.214 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 06:58:25 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[68996]: ts=2026-03-10T06:58:25.002Z caller=node_exporter.go:115 level=info collector=time 2026-03-10T06:58:25.214 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 06:58:25 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[68996]: ts=2026-03-10T06:58:25.002Z caller=node_exporter.go:115 level=info collector=udp_queues 2026-03-10T06:58:25.214 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 06:58:25 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[68996]: ts=2026-03-10T06:58:25.002Z caller=node_exporter.go:115 level=info collector=uname 2026-03-10T06:58:25.214 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 06:58:25 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[68996]: ts=2026-03-10T06:58:25.002Z caller=node_exporter.go:115 level=info collector=vmstat 2026-03-10T06:58:25.214 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 06:58:25 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[68996]: ts=2026-03-10T06:58:25.002Z caller=node_exporter.go:115 level=info collector=xfs 2026-03-10T06:58:25.214 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 06:58:25 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[68996]: ts=2026-03-10T06:58:25.002Z caller=node_exporter.go:115 level=info collector=zfs 2026-03-10T06:58:25.214 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 06:58:25 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[68996]: ts=2026-03-10T06:58:25.002Z caller=node_exporter.go:199 level=info msg="Listening on" address=:9100 2026-03-10T06:58:25.214 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 06:58:25 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[68996]: ts=2026-03-10T06:58:25.003Z caller=tls_config.go:195 level=info msg="TLS is disabled." http2=false 2026-03-10T06:58:25.271 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc -- ceph tell osd.0 flush_pg_stats 2026-03-10T06:58:25.271 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc -- ceph tell osd.1 flush_pg_stats 2026-03-10T06:58:25.271 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc -- ceph tell osd.2 flush_pg_stats 2026-03-10T06:58:25.271 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc -- ceph tell osd.3 flush_pg_stats 2026-03-10T06:58:25.272 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc -- ceph tell osd.4 flush_pg_stats 2026-03-10T06:58:25.272 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc -- ceph tell osd.5 flush_pg_stats 2026-03-10T06:58:25.272 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc -- ceph tell osd.6 flush_pg_stats 2026-03-10T06:58:25.272 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc -- ceph tell osd.7 flush_pg_stats 2026-03-10T06:58:25.493 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:25 vm02 ceph-mon[50158]: pgmap v5: 1 pgs: 1 active+clean; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail 2026-03-10T06:58:25.689 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:25 vm05 ceph-mon[48591]: pgmap v5: 1 pgs: 1 active+clean; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail 2026-03-10T06:58:25.689 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:25 vm05 ceph-mon[48591]: from='client.? 192.168.123.102:0/3254901254' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-10T06:58:25.689 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:25 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:25.689 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:25 vm05 ceph-mon[48591]: Deploying daemon node-exporter.b on vm05 2026-03-10T06:58:25.689 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:25 vm05 ceph-mon[48591]: from='client.? 192.168.123.102:0/654754624' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-10T06:58:25.689 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 06:58:25 vm05 systemd[1]: Starting Ceph node-exporter.b for 28bd35e6-1c4e-11f1-9057-21b3549603fc... 2026-03-10T06:58:25.813 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/mon.a/config 2026-03-10T06:58:25.823 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/mon.a/config 2026-03-10T06:58:25.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:25 vm02 ceph-mon[50158]: from='client.? 192.168.123.102:0/3254901254' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-10T06:58:25.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:25 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:25.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:25 vm02 ceph-mon[50158]: Deploying daemon node-exporter.b on vm05 2026-03-10T06:58:25.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:25 vm02 ceph-mon[50158]: from='client.? 192.168.123.102:0/654754624' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-10T06:58:25.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:25 vm02 ceph-mon[54377]: pgmap v5: 1 pgs: 1 active+clean; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail 2026-03-10T06:58:25.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:25 vm02 ceph-mon[54377]: from='client.? 192.168.123.102:0/3254901254' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-10T06:58:25.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:25 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:25.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:25 vm02 ceph-mon[54377]: Deploying daemon node-exporter.b on vm05 2026-03-10T06:58:25.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:25 vm02 ceph-mon[54377]: from='client.? 192.168.123.102:0/654754624' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-10T06:58:25.845 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/mon.a/config 2026-03-10T06:58:25.913 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/mon.a/config 2026-03-10T06:58:25.932 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/mon.a/config 2026-03-10T06:58:26.003 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 06:58:25 vm05 bash[62924]: Trying to pull quay.io/prometheus/node-exporter:v1.3.1... 2026-03-10T06:58:26.261 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/mon.a/config 2026-03-10T06:58:26.261 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/mon.a/config 2026-03-10T06:58:26.264 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/mon.a/config 2026-03-10T06:58:26.858 INFO:teuthology.orchestra.run.vm02.stdout:34359738384 2026-03-10T06:58:26.858 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc -- ceph osd last-stat-seq osd.0 2026-03-10T06:58:26.922 INFO:teuthology.orchestra.run.vm02.stdout:103079215113 2026-03-10T06:58:26.922 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc -- ceph osd last-stat-seq osd.4 2026-03-10T06:58:27.047 INFO:teuthology.orchestra.run.vm02.stdout:60129542157 2026-03-10T06:58:27.047 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc -- ceph osd last-stat-seq osd.2 2026-03-10T06:58:27.253 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 06:58:26 vm05 bash[62924]: Getting image source signatures 2026-03-10T06:58:27.253 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 06:58:26 vm05 bash[62924]: Copying blob sha256:b5db1e299295edf3005515ab7879c1df64a33c185d3a7a23aa4dcaa17d26f7b3 2026-03-10T06:58:27.253 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 06:58:26 vm05 bash[62924]: Copying blob sha256:aa2a8d90b84cb2a9c422e7005cd166a008ccf22ef5d7d4f07128478585ce35ea 2026-03-10T06:58:27.253 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 06:58:26 vm05 bash[62924]: Copying blob sha256:b45d31ee2d7f9f452678a85b0c837c29e12089f31ee8dbac6c8c24dfa4054a30 2026-03-10T06:58:27.532 INFO:teuthology.orchestra.run.vm02.stdout:171798691844 2026-03-10T06:58:27.532 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc -- ceph osd last-stat-seq osd.7 2026-03-10T06:58:27.723 INFO:teuthology.orchestra.run.vm02.stdout:124554051592 2026-03-10T06:58:27.724 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc -- ceph osd last-stat-seq osd.5 2026-03-10T06:58:27.725 INFO:teuthology.orchestra.run.vm02.stdout:47244640270 2026-03-10T06:58:27.725 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc -- ceph osd last-stat-seq osd.1 2026-03-10T06:58:27.744 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/mon.a/config 2026-03-10T06:58:27.748 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:27 vm02 ceph-mon[54377]: pgmap v6: 1 pgs: 1 active+clean; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail 2026-03-10T06:58:27.749 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/mon.a/config 2026-03-10T06:58:27.749 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:27 vm02 ceph-mon[50158]: pgmap v6: 1 pgs: 1 active+clean; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail 2026-03-10T06:58:27.750 INFO:teuthology.orchestra.run.vm02.stdout:146028888070 2026-03-10T06:58:27.750 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc -- ceph osd last-stat-seq osd.6 2026-03-10T06:58:27.756 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:27 vm05 ceph-mon[48591]: pgmap v6: 1 pgs: 1 active+clean; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail 2026-03-10T06:58:27.756 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 06:58:27 vm05 bash[62924]: Copying config sha256:1dbe0e931976487e20e5cfb272087e08a9779c88fd5e9617ed7042dd9751ec26 2026-03-10T06:58:27.756 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 06:58:27 vm05 bash[62924]: Writing manifest to image destination 2026-03-10T06:58:27.756 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 06:58:27 vm05 podman[62924]: 2026-03-10 06:58:27.526001699 +0000 UTC m=+1.846781506 container create 4912deba3ea04b6574b25f06d3c8c5646518dcaeb67f5ecd6ac107b510c7c9e3 (image=quay.io/prometheus/node-exporter:v1.3.1, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b, maintainer=The Prometheus Authors ) 2026-03-10T06:58:27.756 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 06:58:27 vm05 podman[62924]: 2026-03-10 06:58:27.553531085 +0000 UTC m=+1.874310892 container init 4912deba3ea04b6574b25f06d3c8c5646518dcaeb67f5ecd6ac107b510c7c9e3 (image=quay.io/prometheus/node-exporter:v1.3.1, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b, maintainer=The Prometheus Authors ) 2026-03-10T06:58:27.756 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 06:58:27 vm05 podman[62924]: 2026-03-10 06:58:27.556067933 +0000 UTC m=+1.876847740 container start 4912deba3ea04b6574b25f06d3c8c5646518dcaeb67f5ecd6ac107b510c7c9e3 (image=quay.io/prometheus/node-exporter:v1.3.1, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b, maintainer=The Prometheus Authors ) 2026-03-10T06:58:27.756 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 06:58:27 vm05 bash[62924]: 4912deba3ea04b6574b25f06d3c8c5646518dcaeb67f5ecd6ac107b510c7c9e3 2026-03-10T06:58:27.756 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 06:58:27 vm05 podman[62924]: 2026-03-10 06:58:27.519861287 +0000 UTC m=+1.840641084 image pull 1dbe0e931976487e20e5cfb272087e08a9779c88fd5e9617ed7042dd9751ec26 quay.io/prometheus/node-exporter:v1.3.1 2026-03-10T06:58:27.756 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 06:58:27 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[62980]: ts=2026-03-10T06:58:27.560Z caller=node_exporter.go:182 level=info msg="Starting node_exporter" version="(version=1.3.1, branch=HEAD, revision=a2321e7b940ddcff26873612bccdf7cd4c42b6b6)" 2026-03-10T06:58:27.756 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 06:58:27 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[62980]: ts=2026-03-10T06:58:27.560Z caller=node_exporter.go:183 level=info msg="Build context" build_context="(go=go1.17.3, user=root@243aafa5525c, date=20211205-11:09:49)" 2026-03-10T06:58:27.756 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 06:58:27 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[62980]: ts=2026-03-10T06:58:27.560Z caller=filesystem_common.go:111 level=info collector=filesystem msg="Parsed flag --collector.filesystem.mount-points-exclude" flag=^/(dev|proc|run/credentials/.+|sys|var/lib/docker/.+)($|/) 2026-03-10T06:58:27.756 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 06:58:27 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[62980]: ts=2026-03-10T06:58:27.560Z caller=filesystem_common.go:113 level=info collector=filesystem msg="Parsed flag --collector.filesystem.fs-types-exclude" flag=^(autofs|binfmt_misc|bpf|cgroup2?|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|iso9660|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|sysfs|tracefs)$ 2026-03-10T06:58:27.756 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 06:58:27 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[62980]: ts=2026-03-10T06:58:27.561Z caller=node_exporter.go:108 level=info msg="Enabled collectors" 2026-03-10T06:58:27.756 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 06:58:27 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[62980]: ts=2026-03-10T06:58:27.561Z caller=node_exporter.go:115 level=info collector=arp 2026-03-10T06:58:27.756 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 06:58:27 vm05 systemd[1]: Started Ceph node-exporter.b for 28bd35e6-1c4e-11f1-9057-21b3549603fc. 2026-03-10T06:58:27.756 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 06:58:27 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[62980]: ts=2026-03-10T06:58:27.561Z caller=node_exporter.go:115 level=info collector=bcache 2026-03-10T06:58:27.756 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 06:58:27 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[62980]: ts=2026-03-10T06:58:27.561Z caller=node_exporter.go:115 level=info collector=bonding 2026-03-10T06:58:27.756 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 06:58:27 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[62980]: ts=2026-03-10T06:58:27.561Z caller=node_exporter.go:115 level=info collector=btrfs 2026-03-10T06:58:27.756 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 06:58:27 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[62980]: ts=2026-03-10T06:58:27.561Z caller=node_exporter.go:115 level=info collector=conntrack 2026-03-10T06:58:27.756 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 06:58:27 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[62980]: ts=2026-03-10T06:58:27.561Z caller=node_exporter.go:115 level=info collector=cpu 2026-03-10T06:58:27.756 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 06:58:27 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[62980]: ts=2026-03-10T06:58:27.561Z caller=node_exporter.go:115 level=info collector=cpufreq 2026-03-10T06:58:27.756 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 06:58:27 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[62980]: ts=2026-03-10T06:58:27.561Z caller=node_exporter.go:115 level=info collector=diskstats 2026-03-10T06:58:27.756 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 06:58:27 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[62980]: ts=2026-03-10T06:58:27.561Z caller=node_exporter.go:115 level=info collector=dmi 2026-03-10T06:58:27.756 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 06:58:27 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[62980]: ts=2026-03-10T06:58:27.561Z caller=node_exporter.go:115 level=info collector=edac 2026-03-10T06:58:27.756 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 06:58:27 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[62980]: ts=2026-03-10T06:58:27.561Z caller=node_exporter.go:115 level=info collector=entropy 2026-03-10T06:58:27.756 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 06:58:27 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[62980]: ts=2026-03-10T06:58:27.561Z caller=node_exporter.go:115 level=info collector=fibrechannel 2026-03-10T06:58:27.756 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 06:58:27 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[62980]: ts=2026-03-10T06:58:27.561Z caller=node_exporter.go:115 level=info collector=filefd 2026-03-10T06:58:27.756 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 06:58:27 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[62980]: ts=2026-03-10T06:58:27.561Z caller=node_exporter.go:115 level=info collector=filesystem 2026-03-10T06:58:27.756 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 06:58:27 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[62980]: ts=2026-03-10T06:58:27.561Z caller=node_exporter.go:115 level=info collector=hwmon 2026-03-10T06:58:27.756 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 06:58:27 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[62980]: ts=2026-03-10T06:58:27.561Z caller=node_exporter.go:115 level=info collector=infiniband 2026-03-10T06:58:27.756 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 06:58:27 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[62980]: ts=2026-03-10T06:58:27.561Z caller=node_exporter.go:115 level=info collector=ipvs 2026-03-10T06:58:27.756 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 06:58:27 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[62980]: ts=2026-03-10T06:58:27.561Z caller=node_exporter.go:115 level=info collector=loadavg 2026-03-10T06:58:27.756 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 06:58:27 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[62980]: ts=2026-03-10T06:58:27.561Z caller=node_exporter.go:115 level=info collector=mdadm 2026-03-10T06:58:27.756 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 06:58:27 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[62980]: ts=2026-03-10T06:58:27.561Z caller=node_exporter.go:115 level=info collector=meminfo 2026-03-10T06:58:27.756 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 06:58:27 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[62980]: ts=2026-03-10T06:58:27.561Z caller=node_exporter.go:115 level=info collector=netclass 2026-03-10T06:58:27.757 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 06:58:27 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[62980]: ts=2026-03-10T06:58:27.561Z caller=node_exporter.go:115 level=info collector=netdev 2026-03-10T06:58:27.757 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 06:58:27 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[62980]: ts=2026-03-10T06:58:27.561Z caller=node_exporter.go:115 level=info collector=netstat 2026-03-10T06:58:27.757 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 06:58:27 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[62980]: ts=2026-03-10T06:58:27.561Z caller=node_exporter.go:115 level=info collector=nfs 2026-03-10T06:58:27.757 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 06:58:27 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[62980]: ts=2026-03-10T06:58:27.561Z caller=node_exporter.go:115 level=info collector=nfsd 2026-03-10T06:58:27.757 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 06:58:27 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[62980]: ts=2026-03-10T06:58:27.561Z caller=node_exporter.go:115 level=info collector=nvme 2026-03-10T06:58:27.757 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 06:58:27 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[62980]: ts=2026-03-10T06:58:27.561Z caller=node_exporter.go:115 level=info collector=os 2026-03-10T06:58:27.757 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 06:58:27 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[62980]: ts=2026-03-10T06:58:27.561Z caller=node_exporter.go:115 level=info collector=powersupplyclass 2026-03-10T06:58:27.757 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 06:58:27 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[62980]: ts=2026-03-10T06:58:27.561Z caller=node_exporter.go:115 level=info collector=pressure 2026-03-10T06:58:27.757 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 06:58:27 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[62980]: ts=2026-03-10T06:58:27.561Z caller=node_exporter.go:115 level=info collector=rapl 2026-03-10T06:58:27.757 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 06:58:27 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[62980]: ts=2026-03-10T06:58:27.561Z caller=node_exporter.go:115 level=info collector=schedstat 2026-03-10T06:58:27.757 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 06:58:27 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[62980]: ts=2026-03-10T06:58:27.561Z caller=node_exporter.go:115 level=info collector=sockstat 2026-03-10T06:58:27.757 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 06:58:27 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[62980]: ts=2026-03-10T06:58:27.561Z caller=node_exporter.go:115 level=info collector=softnet 2026-03-10T06:58:27.757 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 06:58:27 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[62980]: ts=2026-03-10T06:58:27.561Z caller=node_exporter.go:115 level=info collector=stat 2026-03-10T06:58:27.757 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 06:58:27 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[62980]: ts=2026-03-10T06:58:27.561Z caller=node_exporter.go:115 level=info collector=tapestats 2026-03-10T06:58:27.757 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 06:58:27 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[62980]: ts=2026-03-10T06:58:27.561Z caller=node_exporter.go:115 level=info collector=textfile 2026-03-10T06:58:27.757 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 06:58:27 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[62980]: ts=2026-03-10T06:58:27.561Z caller=node_exporter.go:115 level=info collector=thermal_zone 2026-03-10T06:58:27.757 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 06:58:27 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[62980]: ts=2026-03-10T06:58:27.561Z caller=node_exporter.go:115 level=info collector=time 2026-03-10T06:58:27.757 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 06:58:27 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[62980]: ts=2026-03-10T06:58:27.561Z caller=node_exporter.go:115 level=info collector=udp_queues 2026-03-10T06:58:27.757 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 06:58:27 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[62980]: ts=2026-03-10T06:58:27.561Z caller=node_exporter.go:115 level=info collector=uname 2026-03-10T06:58:27.757 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 06:58:27 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[62980]: ts=2026-03-10T06:58:27.561Z caller=node_exporter.go:115 level=info collector=vmstat 2026-03-10T06:58:27.757 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 06:58:27 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[62980]: ts=2026-03-10T06:58:27.561Z caller=node_exporter.go:115 level=info collector=xfs 2026-03-10T06:58:27.757 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 06:58:27 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[62980]: ts=2026-03-10T06:58:27.561Z caller=node_exporter.go:115 level=info collector=zfs 2026-03-10T06:58:27.757 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 06:58:27 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[62980]: ts=2026-03-10T06:58:27.561Z caller=node_exporter.go:199 level=info msg="Listening on" address=:9100 2026-03-10T06:58:27.757 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 06:58:27 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[62980]: ts=2026-03-10T06:58:27.561Z caller=tls_config.go:195 level=info msg="TLS is disabled." http2=false 2026-03-10T06:58:27.764 INFO:teuthology.orchestra.run.vm02.stdout:90194313227 2026-03-10T06:58:27.764 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc -- ceph osd last-stat-seq osd.3 2026-03-10T06:58:28.007 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/mon.a/config 2026-03-10T06:58:28.513 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/mon.a/config 2026-03-10T06:58:28.605 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/mon.a/config 2026-03-10T06:58:28.611 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:28 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:28.611 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:28 vm02 ceph-mon[50158]: Deploying daemon prometheus.a on vm05 2026-03-10T06:58:28.611 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:28 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:28.611 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:28 vm02 ceph-mon[54377]: Deploying daemon prometheus.a on vm05 2026-03-10T06:58:28.626 INFO:teuthology.orchestra.run.vm02.stdout:103079215113 2026-03-10T06:58:28.816 INFO:teuthology.orchestra.run.vm02.stdout:60129542157 2026-03-10T06:58:28.826 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/mon.a/config 2026-03-10T06:58:28.854 INFO:tasks.cephadm.ceph_manager.ceph:need seq 103079215113 got 103079215113 for osd.4 2026-03-10T06:58:28.854 DEBUG:teuthology.parallel:result is None 2026-03-10T06:58:28.997 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/mon.a/config 2026-03-10T06:58:29.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:28 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:29.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:28 vm05 ceph-mon[48591]: Deploying daemon prometheus.a on vm05 2026-03-10T06:58:29.094 INFO:teuthology.orchestra.run.vm02.stdout:34359738384 2026-03-10T06:58:29.120 INFO:tasks.cephadm.ceph_manager.ceph:need seq 60129542157 got 60129542157 for osd.2 2026-03-10T06:58:29.120 DEBUG:teuthology.parallel:result is None 2026-03-10T06:58:29.236 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/mon.a/config 2026-03-10T06:58:29.327 INFO:tasks.cephadm.ceph_manager.ceph:need seq 34359738384 got 34359738384 for osd.0 2026-03-10T06:58:29.327 DEBUG:teuthology.parallel:result is None 2026-03-10T06:58:29.498 INFO:teuthology.orchestra.run.vm02.stdout:171798691844 2026-03-10T06:58:29.609 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:29 vm02 ceph-mon[50158]: pgmap v7: 1 pgs: 1 active+clean; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail 2026-03-10T06:58:29.610 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:29 vm02 ceph-mon[50158]: from='client.? 192.168.123.102:0/919477783' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 4}]: dispatch 2026-03-10T06:58:29.610 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:29 vm02 ceph-mon[50158]: from='client.? 192.168.123.102:0/1692773266' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 2}]: dispatch 2026-03-10T06:58:29.610 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:29 vm02 ceph-mon[50158]: from='client.? 192.168.123.102:0/679051709' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 0}]: dispatch 2026-03-10T06:58:29.610 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:29 vm02 ceph-mon[50158]: from='client.? 192.168.123.102:0/3463292293' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 7}]: dispatch 2026-03-10T06:58:29.610 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:29 vm02 ceph-mon[54377]: pgmap v7: 1 pgs: 1 active+clean; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail 2026-03-10T06:58:29.610 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:29 vm02 ceph-mon[54377]: from='client.? 192.168.123.102:0/919477783' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 4}]: dispatch 2026-03-10T06:58:29.610 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:29 vm02 ceph-mon[54377]: from='client.? 192.168.123.102:0/1692773266' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 2}]: dispatch 2026-03-10T06:58:29.610 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:29 vm02 ceph-mon[54377]: from='client.? 192.168.123.102:0/679051709' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 0}]: dispatch 2026-03-10T06:58:29.610 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:29 vm02 ceph-mon[54377]: from='client.? 192.168.123.102:0/3463292293' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 7}]: dispatch 2026-03-10T06:58:29.680 INFO:tasks.cephadm.ceph_manager.ceph:need seq 171798691844 got 171798691844 for osd.7 2026-03-10T06:58:29.680 DEBUG:teuthology.parallel:result is None 2026-03-10T06:58:29.875 INFO:teuthology.orchestra.run.vm02.stdout:146028888070 2026-03-10T06:58:29.977 INFO:tasks.cephadm.ceph_manager.ceph:need seq 146028888070 got 146028888070 for osd.6 2026-03-10T06:58:29.977 DEBUG:teuthology.parallel:result is None 2026-03-10T06:58:30.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:29 vm05 ceph-mon[48591]: pgmap v7: 1 pgs: 1 active+clean; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail 2026-03-10T06:58:30.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:29 vm05 ceph-mon[48591]: from='client.? 192.168.123.102:0/919477783' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 4}]: dispatch 2026-03-10T06:58:30.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:29 vm05 ceph-mon[48591]: from='client.? 192.168.123.102:0/1692773266' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 2}]: dispatch 2026-03-10T06:58:30.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:29 vm05 ceph-mon[48591]: from='client.? 192.168.123.102:0/679051709' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 0}]: dispatch 2026-03-10T06:58:30.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:29 vm05 ceph-mon[48591]: from='client.? 192.168.123.102:0/3463292293' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 7}]: dispatch 2026-03-10T06:58:30.013 INFO:teuthology.orchestra.run.vm02.stdout:124554051592 2026-03-10T06:58:30.016 INFO:teuthology.orchestra.run.vm02.stdout:47244640270 2026-03-10T06:58:30.035 INFO:teuthology.orchestra.run.vm02.stdout:90194313227 2026-03-10T06:58:30.136 INFO:tasks.cephadm.ceph_manager.ceph:need seq 47244640270 got 47244640270 for osd.1 2026-03-10T06:58:30.137 DEBUG:teuthology.parallel:result is None 2026-03-10T06:58:30.146 INFO:tasks.cephadm.ceph_manager.ceph:need seq 124554051592 got 124554051592 for osd.5 2026-03-10T06:58:30.146 DEBUG:teuthology.parallel:result is None 2026-03-10T06:58:30.146 INFO:tasks.cephadm.ceph_manager.ceph:need seq 90194313227 got 90194313227 for osd.3 2026-03-10T06:58:30.146 DEBUG:teuthology.parallel:result is None 2026-03-10T06:58:30.147 INFO:tasks.cephadm.ceph_manager.ceph:waiting for clean 2026-03-10T06:58:30.147 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc -- ceph pg dump --format=json 2026-03-10T06:58:30.357 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/mon.a/config 2026-03-10T06:58:30.708 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-10T06:58:30.708 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:30 vm02 ceph-mon[50158]: from='client.? 192.168.123.102:0/2208910469' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 6}]: dispatch 2026-03-10T06:58:30.708 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:30 vm02 ceph-mon[50158]: from='client.? 192.168.123.102:0/8264284' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 5}]: dispatch 2026-03-10T06:58:30.708 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:30 vm02 ceph-mon[50158]: from='client.? 192.168.123.102:0/1685657874' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 1}]: dispatch 2026-03-10T06:58:30.708 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:30 vm02 ceph-mon[50158]: from='client.? 192.168.123.102:0/3270293312' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 3}]: dispatch 2026-03-10T06:58:30.708 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:30 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:30.708 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:30 vm02 ceph-mon[54377]: from='client.? 192.168.123.102:0/2208910469' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 6}]: dispatch 2026-03-10T06:58:30.708 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:30 vm02 ceph-mon[54377]: from='client.? 192.168.123.102:0/8264284' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 5}]: dispatch 2026-03-10T06:58:30.708 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:30 vm02 ceph-mon[54377]: from='client.? 192.168.123.102:0/1685657874' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 1}]: dispatch 2026-03-10T06:58:30.708 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:30 vm02 ceph-mon[54377]: from='client.? 192.168.123.102:0/3270293312' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 3}]: dispatch 2026-03-10T06:58:30.708 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:30 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:30.711 INFO:teuthology.orchestra.run.vm02.stderr:dumped all 2026-03-10T06:58:30.758 INFO:teuthology.orchestra.run.vm02.stdout:{"pg_ready":true,"pg_map":{"version":8,"stamp":"2026-03-10T06:58:30.227506+0000","last_osdmap_epoch":0,"last_pg_scan":0,"pg_stats_sum":{"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":192,"num_read_kb":288,"num_write":133,"num_write_kb":1372,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":2,"num_bytes_recovered":397840,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":87,"ondisk_log_size":87,"up":3,"acting":3,"num_store_stats":0},"osd_stats_sum":{"up_from":0,"seq":0,"num_pgs":3,"num_osds":8,"num_per_pool_osds":3,"num_per_pool_omap_osds":3,"kb":167739392,"kb_used":48716,"kb_used_data":4492,"kb_used_omap":0,"kb_used_meta":44160,"kb_avail":167690676,"statfs":{"total":171765137408,"available":171715252224,"internally_reserved":0,"allocated":4599808,"data_stored":2588608,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":45219840},"hb_peers":[],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[]},"pg_stats_delta":{"stat_sum":{"num_bytes":0,"num_objects":0,"num_object_clones":0,"num_object_copies":0,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":0,"num_whiteouts":0,"num_read":0,"num_read_kb":0,"num_write":0,"num_write_kb":0,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":0,"num_bytes_recovered":0,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":0,"ondisk_log_size":0,"up":0,"acting":0,"num_store_stats":0,"stamp_delta":"8.994127"},"pg_stats":[{"pgid":"1.0","version":"43'87","reported_seq":56,"reported_epoch":43,"state":"active+clean","last_fresh":"2026-03-10T06:58:20.328622+0000","last_change":"2026-03-10T06:58:13.844913+0000","last_active":"2026-03-10T06:58:20.328622+0000","last_peered":"2026-03-10T06:58:20.328622+0000","last_clean":"2026-03-10T06:58:20.328622+0000","last_became_active":"2026-03-10T06:58:13.420196+0000","last_became_peered":"2026-03-10T06:58:13.420196+0000","last_unstale":"2026-03-10T06:58:20.328622+0000","last_undegraded":"2026-03-10T06:58:20.328622+0000","last_fullsized":"2026-03-10T06:58:20.328622+0000","mapping_epoch":41,"log_start":"0'0","ondisk_log_start":"0'0","created":16,"last_epoch_clean":42,"parent":"0.0","parent_split_bits":0,"last_scrub":"0'0","last_scrub_stamp":"2026-03-10T06:57:30.167846+0000","last_deep_scrub":"0'0","last_deep_scrub_stamp":"2026-03-10T06:57:30.167846+0000","last_clean_scrub_stamp":"2026-03-10T06:57:30.167846+0000","objects_scrubbed":0,"log_size":87,"ondisk_log_size":87,"stats_invalid":false,"dirty_stats_invalid":false,"omap_stats_invalid":false,"hitset_stats_invalid":false,"hitset_bytes_stats_invalid":false,"pin_stats_invalid":false,"manifest_stats_invalid":false,"snaptrimq_len":0,"last_scrub_duration":0,"scrub_schedule":"periodic scrub scheduled @ 2026-03-11T07:51:39.568951+0000","scrub_duration":0,"objects_trimmed":0,"snaptrim_duration":0,"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":192,"num_read_kb":288,"num_write":133,"num_write_kb":1372,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":2,"num_bytes_recovered":397840,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"up":[7,0,6],"acting":[7,0,6],"avail_no_missing":[],"object_location_counts":[],"blocked_by":[],"up_primary":7,"acting_primary":7,"purged_snaps":[]}],"pool_stats":[{"poolid":1,"num_pg":1,"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":192,"num_read_kb":288,"num_write":133,"num_write_kb":1372,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":2,"num_bytes_recovered":397840,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":1204224,"data_stored":1193520,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":87,"ondisk_log_size":87,"up":3,"acting":3,"num_store_stats":3}],"osd_stats":[{"osd":7,"up_from":40,"seq":171798691844,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":6060,"kb_used_data":804,"kb_used_omap":0,"kb_used_meta":5248,"kb_avail":20961364,"statfs":{"total":21470642176,"available":21464436736,"internally_reserved":0,"allocated":823296,"data_stored":572014,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5373952},"hb_peers":[0,1,2,3,4,5,6],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.56399999999999995}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.61699999999999999}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.73999999999999999}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.59399999999999997}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.65200000000000002}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.55200000000000005}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.66700000000000004}]}]},{"osd":6,"up_from":34,"seq":146028888070,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":6124,"kb_used_data":804,"kb_used_omap":0,"kb_used_meta":5312,"kb_avail":20961300,"statfs":{"total":21470642176,"available":21464371200,"internally_reserved":0,"allocated":823296,"data_stored":572085,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5439488},"hb_peers":[0,1,2,3,4,5,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.84299999999999997}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.70399999999999996}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.68999999999999995}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.63300000000000001}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.59799999999999998}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.61099999999999999}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.73199999999999998}]}]},{"osd":1,"up_from":11,"seq":47244640270,"num_pgs":0,"num_osds":1,"num_per_pool_osds":0,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":6312,"kb_used_data":416,"kb_used_omap":0,"kb_used_meta":5888,"kb_avail":20961112,"statfs":{"total":21470642176,"available":21464178688,"internally_reserved":0,"allocated":425984,"data_stored":174489,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":6029312},"hb_peers":[0,2,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Tue Mar 10 06:58:20 2026","interfaces":[{"interface":"back","average":{"1min":0.53100000000000003,"5min":0.53100000000000003,"15min":0.53100000000000003},"min":{"1min":0.23599999999999999,"5min":0.23599999999999999,"15min":0.23599999999999999},"max":{"1min":0.89100000000000001,"5min":0.89100000000000001,"15min":0.89100000000000001},"last":0.84599999999999997},{"interface":"front","average":{"1min":0.46100000000000002,"5min":0.46100000000000002,"15min":0.46100000000000002},"min":{"1min":0.23300000000000001,"5min":0.23300000000000001,"15min":0.23300000000000001},"max":{"1min":0.72899999999999998,"5min":0.72899999999999998,"15min":0.72899999999999998},"last":0.66400000000000003}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":1.0149999999999999}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.45200000000000001}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.49099999999999999}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.503}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.71599999999999997}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.752}]}]},{"osd":0,"up_from":8,"seq":34359738384,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":6704,"kb_used_data":808,"kb_used_omap":0,"kb_used_meta":5888,"kb_avail":20960720,"statfs":{"total":21470642176,"available":21463777280,"internally_reserved":0,"allocated":827392,"data_stored":572329,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":6029312},"hb_peers":[1,2,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":1,"last update":"Tue Mar 10 06:58:21 2026","interfaces":[{"interface":"back","average":{"1min":0.58499999999999996,"5min":0.58499999999999996,"15min":0.58499999999999996},"min":{"1min":0.28399999999999997,"5min":0.28399999999999997,"15min":0.28399999999999997},"max":{"1min":0.97699999999999998,"5min":0.97699999999999998,"15min":0.97699999999999998},"last":0.32700000000000001},{"interface":"front","average":{"1min":0.60499999999999998,"5min":0.60499999999999998,"15min":0.60499999999999998},"min":{"1min":0.23599999999999999,"5min":0.23599999999999999,"15min":0.23599999999999999},"max":{"1min":1.073,"5min":1.073,"15min":1.073},"last":0.92500000000000004}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.996}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.90000000000000002}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.79500000000000004}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.97399999999999998}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.86899999999999999}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.78100000000000003}]}]},{"osd":2,"up_from":14,"seq":60129542157,"num_pgs":0,"num_osds":1,"num_per_pool_osds":0,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":6248,"kb_used_data":416,"kb_used_omap":0,"kb_used_meta":5824,"kb_avail":20961176,"statfs":{"total":21470642176,"available":21464244224,"internally_reserved":0,"allocated":425984,"data_stored":174489,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5963776},"hb_peers":[0,1,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":1.1020000000000001}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.48799999999999999}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.503}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.60199999999999998}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":1.1859999999999999}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":1.1140000000000001}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":1.085}]}]},{"osd":3,"up_from":21,"seq":90194313227,"num_pgs":0,"num_osds":1,"num_per_pool_osds":0,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":5736,"kb_used_data":416,"kb_used_omap":0,"kb_used_meta":5312,"kb_avail":20961688,"statfs":{"total":21470642176,"available":21464768512,"internally_reserved":0,"allocated":425984,"data_stored":174489,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5439488},"hb_peers":[0,1,2,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.316}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.53800000000000003}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.59399999999999997}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.30499999999999999}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.46800000000000003}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.45700000000000002}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.377}]}]},{"osd":4,"up_from":24,"seq":103079215113,"num_pgs":0,"num_osds":1,"num_per_pool_osds":0,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":5800,"kb_used_data":416,"kb_used_omap":0,"kb_used_meta":5376,"kb_avail":20961624,"statfs":{"total":21470642176,"available":21464702976,"internally_reserved":0,"allocated":425984,"data_stored":174489,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5505024},"hb_peers":[0,1,2,3,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.54900000000000004}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.71799999999999997}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.36499999999999999}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.52500000000000002}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.378}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.54100000000000004}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.5}]}]},{"osd":5,"up_from":29,"seq":124554051592,"num_pgs":0,"num_osds":1,"num_per_pool_osds":0,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":5732,"kb_used_data":412,"kb_used_omap":0,"kb_used_meta":5312,"kb_avail":20961692,"statfs":{"total":21470642176,"available":21464772608,"internally_reserved":0,"allocated":421888,"data_stored":174224,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5439488},"hb_peers":[0,1,2,3,4,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.72399999999999998}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.747}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.90300000000000002}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.77900000000000003}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.63100000000000001}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.56200000000000006}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.72999999999999998}]}]}],"pool_statfs":[{"poolid":1,"osd":0,"total":0,"available":0,"internally_reserved":0,"allocated":401408,"data_stored":397840,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":6,"total":0,"available":0,"internally_reserved":0,"allocated":401408,"data_stored":397840,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":7,"total":0,"available":0,"internally_reserved":0,"allocated":401408,"data_stored":397840,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0}]}} 2026-03-10T06:58:30.759 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc -- ceph pg dump --format=json 2026-03-10T06:58:30.910 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/mon.a/config 2026-03-10T06:58:31.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:30 vm05 ceph-mon[48591]: from='client.? 192.168.123.102:0/2208910469' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 6}]: dispatch 2026-03-10T06:58:31.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:30 vm05 ceph-mon[48591]: from='client.? 192.168.123.102:0/8264284' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 5}]: dispatch 2026-03-10T06:58:31.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:30 vm05 ceph-mon[48591]: from='client.? 192.168.123.102:0/1685657874' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 1}]: dispatch 2026-03-10T06:58:31.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:30 vm05 ceph-mon[48591]: from='client.? 192.168.123.102:0/3270293312' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 3}]: dispatch 2026-03-10T06:58:31.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:30 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:31.268 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-10T06:58:31.270 INFO:teuthology.orchestra.run.vm02.stderr:dumped all 2026-03-10T06:58:31.335 INFO:teuthology.orchestra.run.vm02.stdout:{"pg_ready":true,"pg_map":{"version":8,"stamp":"2026-03-10T06:58:30.227506+0000","last_osdmap_epoch":0,"last_pg_scan":0,"pg_stats_sum":{"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":192,"num_read_kb":288,"num_write":133,"num_write_kb":1372,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":2,"num_bytes_recovered":397840,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":87,"ondisk_log_size":87,"up":3,"acting":3,"num_store_stats":0},"osd_stats_sum":{"up_from":0,"seq":0,"num_pgs":3,"num_osds":8,"num_per_pool_osds":3,"num_per_pool_omap_osds":3,"kb":167739392,"kb_used":48716,"kb_used_data":4492,"kb_used_omap":0,"kb_used_meta":44160,"kb_avail":167690676,"statfs":{"total":171765137408,"available":171715252224,"internally_reserved":0,"allocated":4599808,"data_stored":2588608,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":45219840},"hb_peers":[],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[]},"pg_stats_delta":{"stat_sum":{"num_bytes":0,"num_objects":0,"num_object_clones":0,"num_object_copies":0,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":0,"num_whiteouts":0,"num_read":0,"num_read_kb":0,"num_write":0,"num_write_kb":0,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":0,"num_bytes_recovered":0,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":0,"ondisk_log_size":0,"up":0,"acting":0,"num_store_stats":0,"stamp_delta":"8.994127"},"pg_stats":[{"pgid":"1.0","version":"43'87","reported_seq":56,"reported_epoch":43,"state":"active+clean","last_fresh":"2026-03-10T06:58:20.328622+0000","last_change":"2026-03-10T06:58:13.844913+0000","last_active":"2026-03-10T06:58:20.328622+0000","last_peered":"2026-03-10T06:58:20.328622+0000","last_clean":"2026-03-10T06:58:20.328622+0000","last_became_active":"2026-03-10T06:58:13.420196+0000","last_became_peered":"2026-03-10T06:58:13.420196+0000","last_unstale":"2026-03-10T06:58:20.328622+0000","last_undegraded":"2026-03-10T06:58:20.328622+0000","last_fullsized":"2026-03-10T06:58:20.328622+0000","mapping_epoch":41,"log_start":"0'0","ondisk_log_start":"0'0","created":16,"last_epoch_clean":42,"parent":"0.0","parent_split_bits":0,"last_scrub":"0'0","last_scrub_stamp":"2026-03-10T06:57:30.167846+0000","last_deep_scrub":"0'0","last_deep_scrub_stamp":"2026-03-10T06:57:30.167846+0000","last_clean_scrub_stamp":"2026-03-10T06:57:30.167846+0000","objects_scrubbed":0,"log_size":87,"ondisk_log_size":87,"stats_invalid":false,"dirty_stats_invalid":false,"omap_stats_invalid":false,"hitset_stats_invalid":false,"hitset_bytes_stats_invalid":false,"pin_stats_invalid":false,"manifest_stats_invalid":false,"snaptrimq_len":0,"last_scrub_duration":0,"scrub_schedule":"periodic scrub scheduled @ 2026-03-11T07:51:39.568951+0000","scrub_duration":0,"objects_trimmed":0,"snaptrim_duration":0,"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":192,"num_read_kb":288,"num_write":133,"num_write_kb":1372,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":2,"num_bytes_recovered":397840,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"up":[7,0,6],"acting":[7,0,6],"avail_no_missing":[],"object_location_counts":[],"blocked_by":[],"up_primary":7,"acting_primary":7,"purged_snaps":[]}],"pool_stats":[{"poolid":1,"num_pg":1,"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":192,"num_read_kb":288,"num_write":133,"num_write_kb":1372,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":2,"num_bytes_recovered":397840,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":1204224,"data_stored":1193520,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":87,"ondisk_log_size":87,"up":3,"acting":3,"num_store_stats":3}],"osd_stats":[{"osd":7,"up_from":40,"seq":171798691844,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":6060,"kb_used_data":804,"kb_used_omap":0,"kb_used_meta":5248,"kb_avail":20961364,"statfs":{"total":21470642176,"available":21464436736,"internally_reserved":0,"allocated":823296,"data_stored":572014,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5373952},"hb_peers":[0,1,2,3,4,5,6],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.56399999999999995}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.61699999999999999}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.73999999999999999}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.59399999999999997}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.65200000000000002}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.55200000000000005}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.66700000000000004}]}]},{"osd":6,"up_from":34,"seq":146028888070,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":6124,"kb_used_data":804,"kb_used_omap":0,"kb_used_meta":5312,"kb_avail":20961300,"statfs":{"total":21470642176,"available":21464371200,"internally_reserved":0,"allocated":823296,"data_stored":572085,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5439488},"hb_peers":[0,1,2,3,4,5,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.84299999999999997}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.70399999999999996}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.68999999999999995}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.63300000000000001}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.59799999999999998}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.61099999999999999}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.73199999999999998}]}]},{"osd":1,"up_from":11,"seq":47244640270,"num_pgs":0,"num_osds":1,"num_per_pool_osds":0,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":6312,"kb_used_data":416,"kb_used_omap":0,"kb_used_meta":5888,"kb_avail":20961112,"statfs":{"total":21470642176,"available":21464178688,"internally_reserved":0,"allocated":425984,"data_stored":174489,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":6029312},"hb_peers":[0,2,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Tue Mar 10 06:58:20 2026","interfaces":[{"interface":"back","average":{"1min":0.53100000000000003,"5min":0.53100000000000003,"15min":0.53100000000000003},"min":{"1min":0.23599999999999999,"5min":0.23599999999999999,"15min":0.23599999999999999},"max":{"1min":0.89100000000000001,"5min":0.89100000000000001,"15min":0.89100000000000001},"last":0.84599999999999997},{"interface":"front","average":{"1min":0.46100000000000002,"5min":0.46100000000000002,"15min":0.46100000000000002},"min":{"1min":0.23300000000000001,"5min":0.23300000000000001,"15min":0.23300000000000001},"max":{"1min":0.72899999999999998,"5min":0.72899999999999998,"15min":0.72899999999999998},"last":0.66400000000000003}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":1.0149999999999999}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.45200000000000001}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.49099999999999999}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.503}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.71599999999999997}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.752}]}]},{"osd":0,"up_from":8,"seq":34359738384,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":6704,"kb_used_data":808,"kb_used_omap":0,"kb_used_meta":5888,"kb_avail":20960720,"statfs":{"total":21470642176,"available":21463777280,"internally_reserved":0,"allocated":827392,"data_stored":572329,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":6029312},"hb_peers":[1,2,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":1,"last update":"Tue Mar 10 06:58:21 2026","interfaces":[{"interface":"back","average":{"1min":0.58499999999999996,"5min":0.58499999999999996,"15min":0.58499999999999996},"min":{"1min":0.28399999999999997,"5min":0.28399999999999997,"15min":0.28399999999999997},"max":{"1min":0.97699999999999998,"5min":0.97699999999999998,"15min":0.97699999999999998},"last":0.32700000000000001},{"interface":"front","average":{"1min":0.60499999999999998,"5min":0.60499999999999998,"15min":0.60499999999999998},"min":{"1min":0.23599999999999999,"5min":0.23599999999999999,"15min":0.23599999999999999},"max":{"1min":1.073,"5min":1.073,"15min":1.073},"last":0.92500000000000004}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.996}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.90000000000000002}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.79500000000000004}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.97399999999999998}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.86899999999999999}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.78100000000000003}]}]},{"osd":2,"up_from":14,"seq":60129542157,"num_pgs":0,"num_osds":1,"num_per_pool_osds":0,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":6248,"kb_used_data":416,"kb_used_omap":0,"kb_used_meta":5824,"kb_avail":20961176,"statfs":{"total":21470642176,"available":21464244224,"internally_reserved":0,"allocated":425984,"data_stored":174489,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5963776},"hb_peers":[0,1,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":1.1020000000000001}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.48799999999999999}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.503}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.60199999999999998}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":1.1859999999999999}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":1.1140000000000001}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":1.085}]}]},{"osd":3,"up_from":21,"seq":90194313227,"num_pgs":0,"num_osds":1,"num_per_pool_osds":0,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":5736,"kb_used_data":416,"kb_used_omap":0,"kb_used_meta":5312,"kb_avail":20961688,"statfs":{"total":21470642176,"available":21464768512,"internally_reserved":0,"allocated":425984,"data_stored":174489,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5439488},"hb_peers":[0,1,2,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.316}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.53800000000000003}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.59399999999999997}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.30499999999999999}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.46800000000000003}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.45700000000000002}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.377}]}]},{"osd":4,"up_from":24,"seq":103079215113,"num_pgs":0,"num_osds":1,"num_per_pool_osds":0,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":5800,"kb_used_data":416,"kb_used_omap":0,"kb_used_meta":5376,"kb_avail":20961624,"statfs":{"total":21470642176,"available":21464702976,"internally_reserved":0,"allocated":425984,"data_stored":174489,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5505024},"hb_peers":[0,1,2,3,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.54900000000000004}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.71799999999999997}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.36499999999999999}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.52500000000000002}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.378}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.54100000000000004}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.5}]}]},{"osd":5,"up_from":29,"seq":124554051592,"num_pgs":0,"num_osds":1,"num_per_pool_osds":0,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":5732,"kb_used_data":412,"kb_used_omap":0,"kb_used_meta":5312,"kb_avail":20961692,"statfs":{"total":21470642176,"available":21464772608,"internally_reserved":0,"allocated":421888,"data_stored":174224,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5439488},"hb_peers":[0,1,2,3,4,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.72399999999999998}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.747}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.90300000000000002}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.77900000000000003}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.63100000000000001}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.56200000000000006}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.72999999999999998}]}]}],"pool_statfs":[{"poolid":1,"osd":0,"total":0,"available":0,"internally_reserved":0,"allocated":401408,"data_stored":397840,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":6,"total":0,"available":0,"internally_reserved":0,"allocated":401408,"data_stored":397840,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":7,"total":0,"available":0,"internally_reserved":0,"allocated":401408,"data_stored":397840,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0}]}} 2026-03-10T06:58:31.336 INFO:tasks.cephadm.ceph_manager.ceph:clean! 2026-03-10T06:58:31.336 INFO:tasks.ceph:Waiting until ceph cluster ceph is healthy... 2026-03-10T06:58:31.336 INFO:tasks.cephadm.ceph_manager.ceph:wait_until_healthy 2026-03-10T06:58:31.336 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc -- ceph health --format=json 2026-03-10T06:58:31.486 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/mon.a/config 2026-03-10T06:58:31.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:31 vm05 ceph-mon[48591]: pgmap v8: 1 pgs: 1 active+clean; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail 2026-03-10T06:58:31.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:31 vm05 ceph-mon[48591]: from='client.24416 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T06:58:31.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:31 vm02 ceph-mon[54377]: pgmap v8: 1 pgs: 1 active+clean; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail 2026-03-10T06:58:31.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:31 vm02 ceph-mon[54377]: from='client.24416 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T06:58:31.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:31 vm02 ceph-mon[50158]: pgmap v8: 1 pgs: 1 active+clean; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail 2026-03-10T06:58:31.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:31 vm02 ceph-mon[50158]: from='client.24416 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T06:58:31.861 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-10T06:58:31.861 INFO:teuthology.orchestra.run.vm02.stdout:{"status":"HEALTH_OK","checks":{},"mutes":[]} 2026-03-10T06:58:31.926 INFO:tasks.cephadm.ceph_manager.ceph:wait_until_healthy done 2026-03-10T06:58:31.927 INFO:tasks.cephadm:Setup complete, yielding 2026-03-10T06:58:31.927 INFO:teuthology.run_tasks:Running task cephadm.shell... 2026-03-10T06:58:31.929 INFO:tasks.cephadm:Running commands on role mon.a host ubuntu@vm02.local 2026-03-10T06:58:31.929 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc -- bash -c 'ceph config set mgr mgr/cephadm/use_repo_digest false --force' 2026-03-10T06:58:32.104 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 06:58:31 vm05 systemd[1]: Starting Ceph prometheus.a for 28bd35e6-1c4e-11f1-9057-21b3549603fc... 2026-03-10T06:58:32.505 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 06:58:32 vm05 podman[63401]: 2026-03-10 06:58:32.10399026 +0000 UTC m=+0.021306539 container create cd9aa5691228f6f07e7b0ea4f324cd770f3fe97915f24269ab87f4d2de4406e2 (image=quay.io/prometheus/prometheus:v2.33.4, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-10T06:58:32.505 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 06:58:32 vm05 podman[63401]: 2026-03-10 06:58:32.13641104 +0000 UTC m=+0.053727329 container init cd9aa5691228f6f07e7b0ea4f324cd770f3fe97915f24269ab87f4d2de4406e2 (image=quay.io/prometheus/prometheus:v2.33.4, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-10T06:58:32.505 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 06:58:32 vm05 podman[63401]: 2026-03-10 06:58:32.139903777 +0000 UTC m=+0.057220056 container start cd9aa5691228f6f07e7b0ea4f324cd770f3fe97915f24269ab87f4d2de4406e2 (image=quay.io/prometheus/prometheus:v2.33.4, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-10T06:58:32.505 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 06:58:32 vm05 bash[63401]: cd9aa5691228f6f07e7b0ea4f324cd770f3fe97915f24269ab87f4d2de4406e2 2026-03-10T06:58:32.505 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 06:58:32 vm05 podman[63401]: 2026-03-10 06:58:32.095279947 +0000 UTC m=+0.012596227 image pull 514e6a882f6e74806a5856468489eeff8d7106095557578da96935e4d0ba4d9d quay.io/prometheus/prometheus:v2.33.4 2026-03-10T06:58:32.505 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 06:58:32 vm05 systemd[1]: Started Ceph prometheus.a for 28bd35e6-1c4e-11f1-9057-21b3549603fc. 2026-03-10T06:58:32.505 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 06:58:32 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[63412]: ts=2026-03-10T06:58:32.170Z caller=main.go:475 level=info msg="No time or size retention was set so using the default time retention" duration=15d 2026-03-10T06:58:32.505 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 06:58:32 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[63412]: ts=2026-03-10T06:58:32.170Z caller=main.go:512 level=info msg="Starting Prometheus" version="(version=2.33.4, branch=HEAD, revision=83032011a5d3e6102624fe58241a374a7201fee8)" 2026-03-10T06:58:32.505 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 06:58:32 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[63412]: ts=2026-03-10T06:58:32.170Z caller=main.go:517 level=info build_context="(go=go1.17.7, user=root@d13bf69e7be8, date=20220222-16:51:28)" 2026-03-10T06:58:32.505 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 06:58:32 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[63412]: ts=2026-03-10T06:58:32.170Z caller=main.go:518 level=info host_details="(Linux 5.14.0-686.el9.x86_64 #1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026 x86_64 vm05 (none))" 2026-03-10T06:58:32.505 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 06:58:32 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[63412]: ts=2026-03-10T06:58:32.170Z caller=main.go:519 level=info fd_limits="(soft=1048576, hard=1048576)" 2026-03-10T06:58:32.505 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 06:58:32 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[63412]: ts=2026-03-10T06:58:32.170Z caller=main.go:520 level=info vm_limits="(soft=unlimited, hard=unlimited)" 2026-03-10T06:58:32.505 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 06:58:32 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[63412]: ts=2026-03-10T06:58:32.171Z caller=web.go:570 level=info component=web msg="Start listening for connections" address=:9095 2026-03-10T06:58:32.505 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 06:58:32 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[63412]: ts=2026-03-10T06:58:32.172Z caller=main.go:923 level=info msg="Starting TSDB ..." 2026-03-10T06:58:32.505 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 06:58:32 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[63412]: ts=2026-03-10T06:58:32.174Z caller=tls_config.go:195 level=info component=web msg="TLS is disabled." http2=false 2026-03-10T06:58:32.505 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 06:58:32 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[63412]: ts=2026-03-10T06:58:32.174Z caller=head.go:493 level=info component=tsdb msg="Replaying on-disk memory mappable chunks if any" 2026-03-10T06:58:32.505 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 06:58:32 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[63412]: ts=2026-03-10T06:58:32.174Z caller=head.go:527 level=info component=tsdb msg="On-disk memory mappable chunks replay completed" duration=1.072µs 2026-03-10T06:58:32.505 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 06:58:32 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[63412]: ts=2026-03-10T06:58:32.174Z caller=head.go:533 level=info component=tsdb msg="Replaying WAL, this may take a while" 2026-03-10T06:58:32.505 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 06:58:32 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[63412]: ts=2026-03-10T06:58:32.174Z caller=head.go:604 level=info component=tsdb msg="WAL segment loaded" segment=0 maxSegment=0 2026-03-10T06:58:32.505 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 06:58:32 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[63412]: ts=2026-03-10T06:58:32.174Z caller=head.go:610 level=info component=tsdb msg="WAL replay completed" checkpoint_replay_duration=20.108µs wal_replay_duration=80.451µs total_replay_duration=110.467µs 2026-03-10T06:58:32.505 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 06:58:32 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[63412]: ts=2026-03-10T06:58:32.175Z caller=main.go:944 level=info fs_type=XFS_SUPER_MAGIC 2026-03-10T06:58:32.505 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 06:58:32 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[63412]: ts=2026-03-10T06:58:32.175Z caller=main.go:947 level=info msg="TSDB started" 2026-03-10T06:58:32.505 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 06:58:32 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[63412]: ts=2026-03-10T06:58:32.175Z caller=main.go:1128 level=info msg="Loading configuration file" filename=/etc/prometheus/prometheus.yml 2026-03-10T06:58:32.505 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 06:58:32 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[63412]: ts=2026-03-10T06:58:32.185Z caller=main.go:1165 level=info msg="Completed loading of configuration file" filename=/etc/prometheus/prometheus.yml totalDuration=9.830859ms db_storage=381ns remote_storage=751ns web_handler=151ns query_engine=370ns scrape=552.074µs scrape_sd=15.549µs notify=432ns notify_sd=922ns rules=9.062422ms 2026-03-10T06:58:32.505 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 06:58:32 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[63412]: ts=2026-03-10T06:58:32.185Z caller=main.go:896 level=info msg="Server is ready to receive web requests." 2026-03-10T06:58:32.519 INFO:teuthology.run_tasks:Running task cephadm.shell... 2026-03-10T06:58:32.521 INFO:tasks.cephadm:Running commands on role mon.a host ubuntu@vm02.local 2026-03-10T06:58:32.521 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'radosgw-admin realm create --rgw-realm=r --default' 2026-03-10T06:58:32.713 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:32 vm02 ceph-mon[50158]: from='client.24430 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T06:58:32.713 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:32 vm02 ceph-mon[50158]: from='client.? 192.168.123.102:0/3477396446' entity='client.admin' cmd=[{"prefix": "health", "format": "json"}]: dispatch 2026-03-10T06:58:32.713 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:32 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:32.713 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:32 vm02 ceph-mon[50158]: from='client.? ' entity='client.admin' 2026-03-10T06:58:32.713 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:32 vm02 ceph-mon[54377]: from='client.24430 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T06:58:32.714 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:32 vm02 ceph-mon[54377]: from='client.? 192.168.123.102:0/3477396446' entity='client.admin' cmd=[{"prefix": "health", "format": "json"}]: dispatch 2026-03-10T06:58:32.714 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:32 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:32.714 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:32 vm02 ceph-mon[54377]: from='client.? ' entity='client.admin' 2026-03-10T06:58:33.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:32 vm05 ceph-mon[48591]: from='client.24430 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T06:58:33.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:32 vm05 ceph-mon[48591]: from='client.? 192.168.123.102:0/3477396446' entity='client.admin' cmd=[{"prefix": "health", "format": "json"}]: dispatch 2026-03-10T06:58:33.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:32 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:33.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:32 vm05 ceph-mon[48591]: from='client.? ' entity='client.admin' 2026-03-10T06:58:34.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:33 vm05 ceph-mon[48591]: Deploying daemon alertmanager.a on vm02 2026-03-10T06:58:34.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:33 vm05 ceph-mon[48591]: pgmap v9: 1 pgs: 1 active+clean; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail 2026-03-10T06:58:34.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:33 vm02 ceph-mon[54377]: Deploying daemon alertmanager.a on vm02 2026-03-10T06:58:34.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:33 vm02 ceph-mon[54377]: pgmap v9: 1 pgs: 1 active+clean; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail 2026-03-10T06:58:34.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:33 vm02 ceph-mon[50158]: Deploying daemon alertmanager.a on vm02 2026-03-10T06:58:34.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:33 vm02 ceph-mon[50158]: pgmap v9: 1 pgs: 1 active+clean; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail 2026-03-10T06:58:34.724 INFO:teuthology.orchestra.run.vm02.stdout:{ 2026-03-10T06:58:34.724 INFO:teuthology.orchestra.run.vm02.stdout: "id": "185ed7af-6a6c-493b-b935-45e8f1d8f004", 2026-03-10T06:58:34.724 INFO:teuthology.orchestra.run.vm02.stdout: "name": "r", 2026-03-10T06:58:34.724 INFO:teuthology.orchestra.run.vm02.stdout: "current_period": "66fae4a1-38f2-4655-b0b4-d0e3d58bc665", 2026-03-10T06:58:34.724 INFO:teuthology.orchestra.run.vm02.stdout: "epoch": 1 2026-03-10T06:58:34.724 INFO:teuthology.orchestra.run.vm02.stdout:} 2026-03-10T06:58:34.788 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'radosgw-admin zonegroup create --rgw-zonegroup=default --master --default' 2026-03-10T06:58:35.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:34 vm05 ceph-mon[48591]: osdmap e44: 8 total, 8 up, 8 in 2026-03-10T06:58:35.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:34 vm05 ceph-mon[48591]: from='client.? 192.168.123.102:0/2760218287' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]: dispatch 2026-03-10T06:58:35.008 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:34 vm02 ceph-mon[50158]: osdmap e44: 8 total, 8 up, 8 in 2026-03-10T06:58:35.008 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:34 vm02 ceph-mon[50158]: from='client.? 192.168.123.102:0/2760218287' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]: dispatch 2026-03-10T06:58:35.008 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:34 vm02 ceph-mon[54377]: osdmap e44: 8 total, 8 up, 8 in 2026-03-10T06:58:35.008 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:34 vm02 ceph-mon[54377]: from='client.? 192.168.123.102:0/2760218287' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]: dispatch 2026-03-10T06:58:35.533 INFO:teuthology.orchestra.run.vm02.stdout:{ 2026-03-10T06:58:35.533 INFO:teuthology.orchestra.run.vm02.stdout: "id": "698943e1-6efd-4aa1-ac06-c2ced18f8859", 2026-03-10T06:58:35.533 INFO:teuthology.orchestra.run.vm02.stdout: "name": "default", 2026-03-10T06:58:35.533 INFO:teuthology.orchestra.run.vm02.stdout: "api_name": "default", 2026-03-10T06:58:35.533 INFO:teuthology.orchestra.run.vm02.stdout: "is_master": "true", 2026-03-10T06:58:35.533 INFO:teuthology.orchestra.run.vm02.stdout: "endpoints": [], 2026-03-10T06:58:35.533 INFO:teuthology.orchestra.run.vm02.stdout: "hostnames": [], 2026-03-10T06:58:35.533 INFO:teuthology.orchestra.run.vm02.stdout: "hostnames_s3website": [], 2026-03-10T06:58:35.533 INFO:teuthology.orchestra.run.vm02.stdout: "master_zone": "", 2026-03-10T06:58:35.533 INFO:teuthology.orchestra.run.vm02.stdout: "zones": [], 2026-03-10T06:58:35.533 INFO:teuthology.orchestra.run.vm02.stdout: "placement_targets": [], 2026-03-10T06:58:35.533 INFO:teuthology.orchestra.run.vm02.stdout: "default_placement": "", 2026-03-10T06:58:35.534 INFO:teuthology.orchestra.run.vm02.stdout: "realm_id": "185ed7af-6a6c-493b-b935-45e8f1d8f004", 2026-03-10T06:58:35.534 INFO:teuthology.orchestra.run.vm02.stdout: "sync_policy": { 2026-03-10T06:58:35.534 INFO:teuthology.orchestra.run.vm02.stdout: "groups": [] 2026-03-10T06:58:35.534 INFO:teuthology.orchestra.run.vm02.stdout: } 2026-03-10T06:58:35.534 INFO:teuthology.orchestra.run.vm02.stdout:} 2026-03-10T06:58:35.664 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'radosgw-admin zone create --rgw-zonegroup=default --rgw-zone=z --master --default' 2026-03-10T06:58:35.892 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:35 vm02 ceph-mon[54377]: pgmap v11: 33 pgs: 32 unknown, 1 active+clean; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail 2026-03-10T06:58:35.892 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:35 vm02 ceph-mon[54377]: from='client.? 192.168.123.102:0/2760218287' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]': finished 2026-03-10T06:58:35.892 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:35 vm02 ceph-mon[54377]: osdmap e45: 8 total, 8 up, 8 in 2026-03-10T06:58:35.892 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:35 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:35.892 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:35 vm02 ceph-mon[54377]: osdmap e46: 8 total, 8 up, 8 in 2026-03-10T06:58:35.892 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:35 vm02 ceph-mon[50158]: pgmap v11: 33 pgs: 32 unknown, 1 active+clean; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail 2026-03-10T06:58:35.892 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:35 vm02 ceph-mon[50158]: from='client.? 192.168.123.102:0/2760218287' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]': finished 2026-03-10T06:58:35.892 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:35 vm02 ceph-mon[50158]: osdmap e45: 8 total, 8 up, 8 in 2026-03-10T06:58:35.892 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:35 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:35.892 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:35 vm02 ceph-mon[50158]: osdmap e46: 8 total, 8 up, 8 in 2026-03-10T06:58:35.953 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:35 vm05 ceph-mon[48591]: pgmap v11: 33 pgs: 32 unknown, 1 active+clean; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail 2026-03-10T06:58:35.953 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:35 vm05 ceph-mon[48591]: from='client.? 192.168.123.102:0/2760218287' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]': finished 2026-03-10T06:58:35.953 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:35 vm05 ceph-mon[48591]: osdmap e45: 8 total, 8 up, 8 in 2026-03-10T06:58:35.953 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:35 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:35.953 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:35 vm05 ceph-mon[48591]: osdmap e46: 8 total, 8 up, 8 in 2026-03-10T06:58:36.176 INFO:teuthology.orchestra.run.vm02.stdout:{ 2026-03-10T06:58:36.176 INFO:teuthology.orchestra.run.vm02.stdout: "id": "9e2cd5bd-0321-4e1d-89d0-ed6408b253b2", 2026-03-10T06:58:36.176 INFO:teuthology.orchestra.run.vm02.stdout: "name": "z", 2026-03-10T06:58:36.176 INFO:teuthology.orchestra.run.vm02.stdout: "domain_root": "z.rgw.meta:root", 2026-03-10T06:58:36.176 INFO:teuthology.orchestra.run.vm02.stdout: "control_pool": "z.rgw.control", 2026-03-10T06:58:36.176 INFO:teuthology.orchestra.run.vm02.stdout: "gc_pool": "z.rgw.log:gc", 2026-03-10T06:58:36.176 INFO:teuthology.orchestra.run.vm02.stdout: "lc_pool": "z.rgw.log:lc", 2026-03-10T06:58:36.176 INFO:teuthology.orchestra.run.vm02.stdout: "log_pool": "z.rgw.log", 2026-03-10T06:58:36.176 INFO:teuthology.orchestra.run.vm02.stdout: "intent_log_pool": "z.rgw.log:intent", 2026-03-10T06:58:36.176 INFO:teuthology.orchestra.run.vm02.stdout: "usage_log_pool": "z.rgw.log:usage", 2026-03-10T06:58:36.176 INFO:teuthology.orchestra.run.vm02.stdout: "roles_pool": "z.rgw.meta:roles", 2026-03-10T06:58:36.176 INFO:teuthology.orchestra.run.vm02.stdout: "reshard_pool": "z.rgw.log:reshard", 2026-03-10T06:58:36.176 INFO:teuthology.orchestra.run.vm02.stdout: "user_keys_pool": "z.rgw.meta:users.keys", 2026-03-10T06:58:36.176 INFO:teuthology.orchestra.run.vm02.stdout: "user_email_pool": "z.rgw.meta:users.email", 2026-03-10T06:58:36.177 INFO:teuthology.orchestra.run.vm02.stdout: "user_swift_pool": "z.rgw.meta:users.swift", 2026-03-10T06:58:36.177 INFO:teuthology.orchestra.run.vm02.stdout: "user_uid_pool": "z.rgw.meta:users.uid", 2026-03-10T06:58:36.177 INFO:teuthology.orchestra.run.vm02.stdout: "otp_pool": "z.rgw.otp", 2026-03-10T06:58:36.177 INFO:teuthology.orchestra.run.vm02.stdout: "system_key": { 2026-03-10T06:58:36.177 INFO:teuthology.orchestra.run.vm02.stdout: "access_key": "", 2026-03-10T06:58:36.177 INFO:teuthology.orchestra.run.vm02.stdout: "secret_key": "" 2026-03-10T06:58:36.177 INFO:teuthology.orchestra.run.vm02.stdout: }, 2026-03-10T06:58:36.177 INFO:teuthology.orchestra.run.vm02.stdout: "placement_pools": [ 2026-03-10T06:58:36.177 INFO:teuthology.orchestra.run.vm02.stdout: { 2026-03-10T06:58:36.177 INFO:teuthology.orchestra.run.vm02.stdout: "key": "default-placement", 2026-03-10T06:58:36.177 INFO:teuthology.orchestra.run.vm02.stdout: "val": { 2026-03-10T06:58:36.177 INFO:teuthology.orchestra.run.vm02.stdout: "index_pool": "z.rgw.buckets.index", 2026-03-10T06:58:36.177 INFO:teuthology.orchestra.run.vm02.stdout: "storage_classes": { 2026-03-10T06:58:36.177 INFO:teuthology.orchestra.run.vm02.stdout: "STANDARD": { 2026-03-10T06:58:36.177 INFO:teuthology.orchestra.run.vm02.stdout: "data_pool": "z.rgw.buckets.data" 2026-03-10T06:58:36.177 INFO:teuthology.orchestra.run.vm02.stdout: } 2026-03-10T06:58:36.177 INFO:teuthology.orchestra.run.vm02.stdout: }, 2026-03-10T06:58:36.177 INFO:teuthology.orchestra.run.vm02.stdout: "data_extra_pool": "z.rgw.buckets.non-ec", 2026-03-10T06:58:36.177 INFO:teuthology.orchestra.run.vm02.stdout: "index_type": 0 2026-03-10T06:58:36.177 INFO:teuthology.orchestra.run.vm02.stdout: } 2026-03-10T06:58:36.177 INFO:teuthology.orchestra.run.vm02.stdout: } 2026-03-10T06:58:36.177 INFO:teuthology.orchestra.run.vm02.stdout: ], 2026-03-10T06:58:36.177 INFO:teuthology.orchestra.run.vm02.stdout: "realm_id": "185ed7af-6a6c-493b-b935-45e8f1d8f004", 2026-03-10T06:58:36.177 INFO:teuthology.orchestra.run.vm02.stdout: "notif_pool": "z.rgw.log:notif" 2026-03-10T06:58:36.177 INFO:teuthology.orchestra.run.vm02.stdout:} 2026-03-10T06:58:36.223 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'radosgw-admin period update --rgw-realm=r --commit' 2026-03-10T06:58:37.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:36 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:37.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:36 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:37.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:36 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:37.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:36 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-10T06:58:37.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:36 vm02 ceph-mon[54377]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-10T06:58:37.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:36 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:37.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:36 vm02 ceph-mon[54377]: Deploying daemon grafana.a on vm05 2026-03-10T06:58:37.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:36 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:37.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:36 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:37.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:36 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:37.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:36 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-10T06:58:37.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:36 vm02 ceph-mon[50158]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-10T06:58:37.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:36 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:37.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:36 vm02 ceph-mon[50158]: Deploying daemon grafana.a on vm05 2026-03-10T06:58:37.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:36 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:37.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:36 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:37.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:36 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:37.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:36 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-10T06:58:37.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:36 vm05 ceph-mon[48591]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-10T06:58:37.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:36 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:37.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:36 vm05 ceph-mon[48591]: Deploying daemon grafana.a on vm05 2026-03-10T06:58:38.085 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 06:58:37 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[50367]: ::ffff:192.168.123.105 - - [10/Mar/2026:06:58:37] "GET /metrics HTTP/1.1" 200 192200 "" "Prometheus/2.33.4" 2026-03-10T06:58:38.085 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 06:58:37 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[72075]: level=info ts=2026-03-10T06:58:37.815Z caller=cluster.go:696 component=cluster msg="gossip not settled" polls=0 before=0 now=1 elapsed=2.000375682s 2026-03-10T06:58:38.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:38 vm05 ceph-mon[48591]: pgmap v14: 33 pgs: 28 unknown, 5 active+clean; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail; 341 B/s wr, 0 op/s 2026-03-10T06:58:38.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:38 vm05 ceph-mon[48591]: osdmap e47: 8 total, 8 up, 8 in 2026-03-10T06:58:38.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:38 vm05 ceph-mon[48591]: from='client.? 192.168.123.102:0/674923237' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.log","app": "rgw"}]: dispatch 2026-03-10T06:58:38.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:38 vm05 ceph-mon[48591]: from='client.? ' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.log","app": "rgw"}]: dispatch 2026-03-10T06:58:38.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:38 vm02 ceph-mon[54377]: pgmap v14: 33 pgs: 28 unknown, 5 active+clean; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail; 341 B/s wr, 0 op/s 2026-03-10T06:58:38.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:38 vm02 ceph-mon[54377]: osdmap e47: 8 total, 8 up, 8 in 2026-03-10T06:58:38.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:38 vm02 ceph-mon[54377]: from='client.? 192.168.123.102:0/674923237' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.log","app": "rgw"}]: dispatch 2026-03-10T06:58:38.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:38 vm02 ceph-mon[54377]: from='client.? ' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.log","app": "rgw"}]: dispatch 2026-03-10T06:58:38.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:38 vm02 ceph-mon[50158]: pgmap v14: 33 pgs: 28 unknown, 5 active+clean; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail; 341 B/s wr, 0 op/s 2026-03-10T06:58:38.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:38 vm02 ceph-mon[50158]: osdmap e47: 8 total, 8 up, 8 in 2026-03-10T06:58:38.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:38 vm02 ceph-mon[50158]: from='client.? 192.168.123.102:0/674923237' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.log","app": "rgw"}]: dispatch 2026-03-10T06:58:38.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:38 vm02 ceph-mon[50158]: from='client.? ' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.log","app": "rgw"}]: dispatch 2026-03-10T06:58:40.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:39 vm05 ceph-mon[48591]: pgmap v16: 65 pgs: 32 unknown, 33 active+clean; 451 KiB data, 49 MiB used, 160 GiB / 160 GiB avail; 1.5 KiB/s rd, 3.5 KiB/s wr, 6 op/s 2026-03-10T06:58:40.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:39 vm05 ceph-mon[48591]: from='client.? ' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": "z.rgw.log","app": "rgw"}]': finished 2026-03-10T06:58:40.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:39 vm05 ceph-mon[48591]: osdmap e48: 8 total, 8 up, 8 in 2026-03-10T06:58:40.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:39 vm02 ceph-mon[54377]: pgmap v16: 65 pgs: 32 unknown, 33 active+clean; 451 KiB data, 49 MiB used, 160 GiB / 160 GiB avail; 1.5 KiB/s rd, 3.5 KiB/s wr, 6 op/s 2026-03-10T06:58:40.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:39 vm02 ceph-mon[54377]: from='client.? ' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": "z.rgw.log","app": "rgw"}]': finished 2026-03-10T06:58:40.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:39 vm02 ceph-mon[54377]: osdmap e48: 8 total, 8 up, 8 in 2026-03-10T06:58:40.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:39 vm02 ceph-mon[50158]: pgmap v16: 65 pgs: 32 unknown, 33 active+clean; 451 KiB data, 49 MiB used, 160 GiB / 160 GiB avail; 1.5 KiB/s rd, 3.5 KiB/s wr, 6 op/s 2026-03-10T06:58:40.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:39 vm02 ceph-mon[50158]: from='client.? ' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": "z.rgw.log","app": "rgw"}]': finished 2026-03-10T06:58:40.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:39 vm02 ceph-mon[50158]: osdmap e48: 8 total, 8 up, 8 in 2026-03-10T06:58:41.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:40 vm05 ceph-mon[48591]: osdmap e49: 8 total, 8 up, 8 in 2026-03-10T06:58:41.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:40 vm05 ceph-mon[48591]: from='client.? 192.168.123.102:0/674923237' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.control","app": "rgw"}]: dispatch 2026-03-10T06:58:41.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:40 vm05 ceph-mon[48591]: from='client.? ' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.control","app": "rgw"}]: dispatch 2026-03-10T06:58:41.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:40 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:41.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:40 vm02 ceph-mon[54377]: osdmap e49: 8 total, 8 up, 8 in 2026-03-10T06:58:41.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:40 vm02 ceph-mon[54377]: from='client.? 192.168.123.102:0/674923237' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.control","app": "rgw"}]: dispatch 2026-03-10T06:58:41.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:40 vm02 ceph-mon[54377]: from='client.? ' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.control","app": "rgw"}]: dispatch 2026-03-10T06:58:41.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:40 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:41.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:40 vm02 ceph-mon[50158]: osdmap e49: 8 total, 8 up, 8 in 2026-03-10T06:58:41.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:40 vm02 ceph-mon[50158]: from='client.? 192.168.123.102:0/674923237' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.control","app": "rgw"}]: dispatch 2026-03-10T06:58:41.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:40 vm02 ceph-mon[50158]: from='client.? ' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.control","app": "rgw"}]: dispatch 2026-03-10T06:58:41.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:40 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:42.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:41 vm05 ceph-mon[48591]: pgmap v19: 97 pgs: 64 unknown, 33 active+clean; 451 KiB data, 49 MiB used, 160 GiB / 160 GiB avail; 1.5 KiB/s rd, 3.5 KiB/s wr, 6 op/s 2026-03-10T06:58:42.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:41 vm05 ceph-mon[48591]: from='client.? ' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": "z.rgw.control","app": "rgw"}]': finished 2026-03-10T06:58:42.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:41 vm05 ceph-mon[48591]: osdmap e50: 8 total, 8 up, 8 in 2026-03-10T06:58:42.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:41 vm05 ceph-mon[48591]: osdmap e51: 8 total, 8 up, 8 in 2026-03-10T06:58:42.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:41 vm05 ceph-mon[48591]: from='client.? 192.168.123.102:0/1530393383' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.meta","app": "rgw"}]: dispatch 2026-03-10T06:58:42.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:41 vm02 ceph-mon[54377]: pgmap v19: 97 pgs: 64 unknown, 33 active+clean; 451 KiB data, 49 MiB used, 160 GiB / 160 GiB avail; 1.5 KiB/s rd, 3.5 KiB/s wr, 6 op/s 2026-03-10T06:58:42.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:41 vm02 ceph-mon[54377]: from='client.? ' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": "z.rgw.control","app": "rgw"}]': finished 2026-03-10T06:58:42.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:41 vm02 ceph-mon[54377]: osdmap e50: 8 total, 8 up, 8 in 2026-03-10T06:58:42.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:41 vm02 ceph-mon[54377]: osdmap e51: 8 total, 8 up, 8 in 2026-03-10T06:58:42.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:41 vm02 ceph-mon[54377]: from='client.? 192.168.123.102:0/1530393383' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.meta","app": "rgw"}]: dispatch 2026-03-10T06:58:42.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:41 vm02 ceph-mon[50158]: pgmap v19: 97 pgs: 64 unknown, 33 active+clean; 451 KiB data, 49 MiB used, 160 GiB / 160 GiB avail; 1.5 KiB/s rd, 3.5 KiB/s wr, 6 op/s 2026-03-10T06:58:42.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:41 vm02 ceph-mon[50158]: from='client.? ' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": "z.rgw.control","app": "rgw"}]': finished 2026-03-10T06:58:42.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:41 vm02 ceph-mon[50158]: osdmap e50: 8 total, 8 up, 8 in 2026-03-10T06:58:42.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:41 vm02 ceph-mon[50158]: osdmap e51: 8 total, 8 up, 8 in 2026-03-10T06:58:42.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:41 vm02 ceph-mon[50158]: from='client.? 192.168.123.102:0/1530393383' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.meta","app": "rgw"}]: dispatch 2026-03-10T06:58:45.085 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:44 vm05 ceph-mon[48591]: pgmap v22: 129 pgs: 32 unknown, 97 active+clean; 451 KiB data, 51 MiB used, 160 GiB / 160 GiB avail; 6.0 KiB/s rd, 511 B/s wr, 7 op/s 2026-03-10T06:58:45.085 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:44 vm05 ceph-mon[48591]: from='client.? 192.168.123.102:0/1530393383' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": "z.rgw.meta","app": "rgw"}]': finished 2026-03-10T06:58:45.085 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:44 vm05 ceph-mon[48591]: osdmap e52: 8 total, 8 up, 8 in 2026-03-10T06:58:45.085 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:44 vm05 ceph-mon[48591]: from='client.? 192.168.123.102:0/1530393383' entity='client.admin' cmd=[{"prefix": "osd pool set", "pool": "z.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]: dispatch 2026-03-10T06:58:45.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:44 vm02 ceph-mon[54377]: pgmap v22: 129 pgs: 32 unknown, 97 active+clean; 451 KiB data, 51 MiB used, 160 GiB / 160 GiB avail; 6.0 KiB/s rd, 511 B/s wr, 7 op/s 2026-03-10T06:58:45.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:44 vm02 ceph-mon[54377]: from='client.? 192.168.123.102:0/1530393383' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": "z.rgw.meta","app": "rgw"}]': finished 2026-03-10T06:58:45.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:44 vm02 ceph-mon[54377]: osdmap e52: 8 total, 8 up, 8 in 2026-03-10T06:58:45.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:44 vm02 ceph-mon[54377]: from='client.? 192.168.123.102:0/1530393383' entity='client.admin' cmd=[{"prefix": "osd pool set", "pool": "z.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]: dispatch 2026-03-10T06:58:45.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:44 vm02 ceph-mon[50158]: pgmap v22: 129 pgs: 32 unknown, 97 active+clean; 451 KiB data, 51 MiB used, 160 GiB / 160 GiB avail; 6.0 KiB/s rd, 511 B/s wr, 7 op/s 2026-03-10T06:58:45.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:44 vm02 ceph-mon[50158]: from='client.? 192.168.123.102:0/1530393383' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": "z.rgw.meta","app": "rgw"}]': finished 2026-03-10T06:58:45.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:44 vm02 ceph-mon[50158]: osdmap e52: 8 total, 8 up, 8 in 2026-03-10T06:58:45.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:44 vm02 ceph-mon[50158]: from='client.? 192.168.123.102:0/1530393383' entity='client.admin' cmd=[{"prefix": "osd pool set", "pool": "z.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]: dispatch 2026-03-10T06:58:45.465 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 systemd[1]: Starting Ceph grafana.a for 28bd35e6-1c4e-11f1-9057-21b3549603fc... 2026-03-10T06:58:45.745 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 podman[63682]: 2026-03-10 06:58:45.464730627 +0000 UTC m=+0.018723404 container create 81ae9caed32eb596b37a84c79ce414d6d3f8e2f9c7a953e9f5c31e985596779b (image=quay.io/ceph/ceph-grafana:8.3.5, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a, maintainer=Paul Cuzner , io.openshift.expose-services=, distribution-scope=public, vcs-type=git, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.5-236.1648460182, architecture=x86_64, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, description=Ceph Grafana Container, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, io.k8s.display-name=Red Hat Universal Base Image 8, vendor=Red Hat, Inc., name=ubi8, com.redhat.component=ubi8-container, io.openshift.tags=base rhel8, vcs-ref=3aadd00326f3dd6cfe65ee31017ab98915fddb56, summary=Grafana Container configured for Ceph mgr/dashboard integration, version=8.5, build-date=2022-03-28T10:36:18.413762, release=236.1648460182, io.buildah.version=1.24.2) 2026-03-10T06:58:45.745 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 podman[63682]: 2026-03-10 06:58:45.502053763 +0000 UTC m=+0.056046560 container init 81ae9caed32eb596b37a84c79ce414d6d3f8e2f9c7a953e9f5c31e985596779b (image=quay.io/ceph/ceph-grafana:8.3.5, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, io.openshift.tags=base rhel8, io.k8s.display-name=Red Hat Universal Base Image 8, vendor=Red Hat, Inc., summary=Grafana Container configured for Ceph mgr/dashboard integration, name=ubi8, vcs-type=git, io.openshift.expose-services=, com.redhat.component=ubi8-container, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, build-date=2022-03-28T10:36:18.413762, maintainer=Paul Cuzner , vcs-ref=3aadd00326f3dd6cfe65ee31017ab98915fddb56, distribution-scope=public, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.5-236.1648460182, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., description=Ceph Grafana Container, architecture=x86_64, version=8.5, release=236.1648460182, io.buildah.version=1.24.2) 2026-03-10T06:58:45.746 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 podman[63682]: 2026-03-10 06:58:45.506314067 +0000 UTC m=+0.060306853 container start 81ae9caed32eb596b37a84c79ce414d6d3f8e2f9c7a953e9f5c31e985596779b (image=quay.io/ceph/ceph-grafana:8.3.5, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.5-236.1648460182, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, vcs-ref=3aadd00326f3dd6cfe65ee31017ab98915fddb56, com.redhat.component=ubi8-container, version=8.5, release=236.1648460182, io.buildah.version=1.24.2, io.openshift.tags=base rhel8, vendor=Red Hat, Inc., distribution-scope=public, description=Ceph Grafana Container, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, vcs-type=git, maintainer=Paul Cuzner , io.k8s.display-name=Red Hat Universal Base Image 8, summary=Grafana Container configured for Ceph mgr/dashboard integration, io.openshift.expose-services=, architecture=x86_64, name=ubi8, build-date=2022-03-28T10:36:18.413762) 2026-03-10T06:58:45.746 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 bash[63682]: 81ae9caed32eb596b37a84c79ce414d6d3f8e2f9c7a953e9f5c31e985596779b 2026-03-10T06:58:45.746 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 podman[63682]: 2026-03-10 06:58:45.457692735 +0000 UTC m=+0.011685521 image pull dad864ee21e98e69f4029d1e417aa085001566be0d322fbc75bc6f29b0050c01 quay.io/ceph/ceph-grafana:8.3.5 2026-03-10T06:58:45.746 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 systemd[1]: Started Ceph grafana.a for 28bd35e6-1c4e-11f1-9057-21b3549603fc. 2026-03-10T06:58:45.746 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="The state of unified alerting is still not defined. The decision will be made during as we run the database migrations" logger=settings 2026-03-10T06:58:45.746 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=warn msg="falling back to legacy setting of 'min_interval_seconds'; please use the configuration option in the `unified_alerting` section if Grafana 8 alerts are enabled." logger=settings 2026-03-10T06:58:45.746 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Config loaded from" logger=settings file=/usr/share/grafana/conf/defaults.ini 2026-03-10T06:58:45.746 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Config loaded from" logger=settings file=/etc/grafana/grafana.ini 2026-03-10T06:58:45.746 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Config overridden from Environment variable" logger=settings var="GF_PATHS_DATA=/var/lib/grafana" 2026-03-10T06:58:45.746 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Config overridden from Environment variable" logger=settings var="GF_PATHS_LOGS=/var/log/grafana" 2026-03-10T06:58:45.746 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Config overridden from Environment variable" logger=settings var="GF_PATHS_PLUGINS=/var/lib/grafana/plugins" 2026-03-10T06:58:45.746 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Config overridden from Environment variable" logger=settings var="GF_PATHS_PROVISIONING=/etc/grafana/provisioning" 2026-03-10T06:58:45.746 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Path Home" logger=settings path=/usr/share/grafana 2026-03-10T06:58:45.746 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Path Data" logger=settings path=/var/lib/grafana 2026-03-10T06:58:45.746 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Path Logs" logger=settings path=/var/log/grafana 2026-03-10T06:58:45.746 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Path Plugins" logger=settings path=/var/lib/grafana/plugins 2026-03-10T06:58:45.746 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Path Provisioning" logger=settings path=/etc/grafana/provisioning 2026-03-10T06:58:45.746 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="App mode production" logger=settings 2026-03-10T06:58:45.746 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Connecting to DB" logger=sqlstore dbtype=sqlite3 2026-03-10T06:58:45.746 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=warn msg="SQLite database file has broader permissions than it should" logger=sqlstore path=/var/lib/grafana/grafana.db mode=-rw-r--r-- expected=-rw-r----- 2026-03-10T06:58:45.746 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Starting DB migrations" logger=migrator 2026-03-10T06:58:45.746 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="create migration_log table" 2026-03-10T06:58:45.746 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="create user table" 2026-03-10T06:58:45.746 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index user.login" 2026-03-10T06:58:45.746 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index user.email" 2026-03-10T06:58:45.746 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="drop index UQE_user_login - v1" 2026-03-10T06:58:45.747 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="drop index UQE_user_email - v1" 2026-03-10T06:58:45.747 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Rename table user to user_v1 - v1" 2026-03-10T06:58:45.747 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="create user table v2" 2026-03-10T06:58:45.747 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_user_login - v2" 2026-03-10T06:58:45.747 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_user_email - v2" 2026-03-10T06:58:45.747 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="copy data_source v1 to v2" 2026-03-10T06:58:45.747 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Drop old table user_v1" 2026-03-10T06:58:45.747 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add column help_flags1 to user table" 2026-03-10T06:58:45.747 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Update user table charset" 2026-03-10T06:58:45.747 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add last_seen_at column to user" 2026-03-10T06:58:45.747 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add missing user data" 2026-03-10T06:58:45.747 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add is_disabled column to user" 2026-03-10T06:58:45.747 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add index user.login/user.email" 2026-03-10T06:58:45.747 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add is_service_account column to user" 2026-03-10T06:58:45.747 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="create temp user table v1-7" 2026-03-10T06:58:45.747 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_temp_user_email - v1-7" 2026-03-10T06:58:45.747 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_temp_user_org_id - v1-7" 2026-03-10T06:58:45.747 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_temp_user_code - v1-7" 2026-03-10T06:58:45.747 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_temp_user_status - v1-7" 2026-03-10T06:58:45.747 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Update temp_user table charset" 2026-03-10T06:58:45.747 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="drop index IDX_temp_user_email - v1" 2026-03-10T06:58:45.747 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="drop index IDX_temp_user_org_id - v1" 2026-03-10T06:58:45.747 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="drop index IDX_temp_user_code - v1" 2026-03-10T06:58:45.747 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="drop index IDX_temp_user_status - v1" 2026-03-10T06:58:45.747 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Rename table temp_user to temp_user_tmp_qwerty - v1" 2026-03-10T06:58:45.747 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="create temp_user v2" 2026-03-10T06:58:45.747 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_temp_user_email - v2" 2026-03-10T06:58:45.747 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_temp_user_org_id - v2" 2026-03-10T06:58:45.747 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_temp_user_code - v2" 2026-03-10T06:58:45.747 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_temp_user_status - v2" 2026-03-10T06:58:45.747 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="copy temp_user v1 to v2" 2026-03-10T06:58:45.747 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="drop temp_user_tmp_qwerty" 2026-03-10T06:58:45.747 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Set created for temp users that will otherwise prematurely expire" 2026-03-10T06:58:45.747 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="create star table" 2026-03-10T06:58:45.748 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index star.user_id_dashboard_id" 2026-03-10T06:58:45.748 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="create org table v1" 2026-03-10T06:58:45.748 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_org_name - v1" 2026-03-10T06:58:45.748 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="create org_user table v1" 2026-03-10T06:58:45.748 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_org_user_org_id - v1" 2026-03-10T06:58:45.748 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_org_user_org_id_user_id - v1" 2026-03-10T06:58:45.748 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_org_user_user_id - v1" 2026-03-10T06:58:45.748 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Update org table charset" 2026-03-10T06:58:45.748 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Update org_user table charset" 2026-03-10T06:58:45.748 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Migrate all Read Only Viewers to Viewers" 2026-03-10T06:58:45.748 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="create dashboard table" 2026-03-10T06:58:45.748 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index dashboard.account_id" 2026-03-10T06:58:45.748 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index dashboard_account_id_slug" 2026-03-10T06:58:45.748 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="create dashboard_tag table" 2026-03-10T06:58:45.748 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index dashboard_tag.dasboard_id_term" 2026-03-10T06:58:45.748 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="drop index UQE_dashboard_tag_dashboard_id_term - v1" 2026-03-10T06:58:45.748 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Rename table dashboard to dashboard_v1 - v1" 2026-03-10T06:58:45.748 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="create dashboard v2" 2026-03-10T06:58:45.748 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_dashboard_org_id - v2" 2026-03-10T06:58:45.748 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_dashboard_org_id_slug - v2" 2026-03-10T06:58:45.748 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="copy dashboard v1 to v2" 2026-03-10T06:58:45.748 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="drop table dashboard_v1" 2026-03-10T06:58:45.748 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="alter dashboard.data to mediumtext v1" 2026-03-10T06:58:45.749 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add column updated_by in dashboard - v2" 2026-03-10T06:58:45.750 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add column created_by in dashboard - v2" 2026-03-10T06:58:45.750 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add column gnetId in dashboard" 2026-03-10T06:58:45.750 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add index for gnetId in dashboard" 2026-03-10T06:58:45.750 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add column plugin_id in dashboard" 2026-03-10T06:58:45.750 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add index for plugin_id in dashboard" 2026-03-10T06:58:45.750 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add index for dashboard_id in dashboard_tag" 2026-03-10T06:58:45.750 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Update dashboard table charset" 2026-03-10T06:58:45.750 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Update dashboard_tag table charset" 2026-03-10T06:58:45.750 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add column folder_id in dashboard" 2026-03-10T06:58:45.750 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add column isFolder in dashboard" 2026-03-10T06:58:45.750 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add column has_acl in dashboard" 2026-03-10T06:58:45.750 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add column uid in dashboard" 2026-03-10T06:58:45.750 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Update uid column values in dashboard" 2026-03-10T06:58:45.750 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add unique index dashboard_org_id_uid" 2026-03-10T06:58:45.750 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Remove unique index org_id_slug" 2026-03-10T06:58:45.750 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Update dashboard title length" 2026-03-10T06:58:45.750 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add unique index for dashboard_org_id_title_folder_id" 2026-03-10T06:58:45.750 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="create dashboard_provisioning" 2026-03-10T06:58:45.750 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Rename table dashboard_provisioning to dashboard_provisioning_tmp_qwerty - v1" 2026-03-10T06:58:45.750 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="create dashboard_provisioning v2" 2026-03-10T06:58:45.750 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_dashboard_provisioning_dashboard_id - v2" 2026-03-10T06:58:45.750 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_dashboard_provisioning_dashboard_id_name - v2" 2026-03-10T06:58:45.750 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="copy dashboard_provisioning v1 to v2" 2026-03-10T06:58:45.750 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="drop dashboard_provisioning_tmp_qwerty" 2026-03-10T06:58:45.750 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add check_sum column" 2026-03-10T06:58:45.750 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add index for dashboard_title" 2026-03-10T06:58:45.750 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="delete tags for deleted dashboards" 2026-03-10T06:58:45.750 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="delete stars for deleted dashboards" 2026-03-10T06:58:45.750 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add index for dashboard_is_folder" 2026-03-10T06:58:45.750 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="create data_source table" 2026-03-10T06:58:45.750 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index data_source.account_id" 2026-03-10T06:58:45.750 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index data_source.account_id_name" 2026-03-10T06:58:45.750 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="drop index IDX_data_source_account_id - v1" 2026-03-10T06:58:45.750 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="drop index UQE_data_source_account_id_name - v1" 2026-03-10T06:58:45.751 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Rename table data_source to data_source_v1 - v1" 2026-03-10T06:58:45.751 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="create data_source table v2" 2026-03-10T06:58:45.751 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_data_source_org_id - v2" 2026-03-10T06:58:45.751 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_data_source_org_id_name - v2" 2026-03-10T06:58:45.751 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="copy data_source v1 to v2" 2026-03-10T06:58:45.751 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Drop old table data_source_v1 #2" 2026-03-10T06:58:45.751 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add column with_credentials" 2026-03-10T06:58:45.751 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add secure json data column" 2026-03-10T06:58:45.751 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Update data_source table charset" 2026-03-10T06:58:45.751 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Update initial version to 1" 2026-03-10T06:58:45.751 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add read_only data column" 2026-03-10T06:58:45.751 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Migrate logging ds to loki ds" 2026-03-10T06:58:45.751 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Update json_data with nulls" 2026-03-10T06:58:45.751 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add uid column" 2026-03-10T06:58:45.751 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Update uid value" 2026-03-10T06:58:45.751 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add unique index datasource_org_id_uid" 2026-03-10T06:58:45.751 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index datasource_org_id_is_default" 2026-03-10T06:58:45.751 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="create api_key table" 2026-03-10T06:58:45.751 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index api_key.account_id" 2026-03-10T06:58:45.751 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index api_key.key" 2026-03-10T06:58:45.751 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index api_key.account_id_name" 2026-03-10T06:58:45.751 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="drop index IDX_api_key_account_id - v1" 2026-03-10T06:58:45.751 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="drop index UQE_api_key_key - v1" 2026-03-10T06:58:45.751 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="drop index UQE_api_key_account_id_name - v1" 2026-03-10T06:58:45.751 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Rename table api_key to api_key_v1 - v1" 2026-03-10T06:58:45.751 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="create api_key table v2" 2026-03-10T06:58:45.752 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_api_key_org_id - v2" 2026-03-10T06:58:45.752 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_api_key_key - v2" 2026-03-10T06:58:45.752 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_api_key_org_id_name - v2" 2026-03-10T06:58:45.752 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="copy api_key v1 to v2" 2026-03-10T06:58:45.752 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Drop old table api_key_v1" 2026-03-10T06:58:45.752 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Update api_key table charset" 2026-03-10T06:58:45.752 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add expires to api_key table" 2026-03-10T06:58:45.752 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add service account foreign key" 2026-03-10T06:58:45.752 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="create dashboard_snapshot table v4" 2026-03-10T06:58:45.752 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="drop table dashboard_snapshot_v4 #1" 2026-03-10T06:58:45.752 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="create dashboard_snapshot table v5 #2" 2026-03-10T06:58:45.752 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_dashboard_snapshot_key - v5" 2026-03-10T06:58:45.752 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_dashboard_snapshot_delete_key - v5" 2026-03-10T06:58:45.752 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_dashboard_snapshot_user_id - v5" 2026-03-10T06:58:45.752 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="alter dashboard_snapshot to mediumtext v2" 2026-03-10T06:58:45.752 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Update dashboard_snapshot table charset" 2026-03-10T06:58:45.752 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add column external_delete_url to dashboard_snapshots table" 2026-03-10T06:58:45.752 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add encrypted dashboard json column" 2026-03-10T06:58:45.752 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Change dashboard_encrypted column to MEDIUMBLOB" 2026-03-10T06:58:45.752 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="create quota table v1" 2026-03-10T06:58:45.753 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_quota_org_id_user_id_target - v1" 2026-03-10T06:58:45.753 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Update quota table charset" 2026-03-10T06:58:45.753 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="create plugin_setting table" 2026-03-10T06:58:45.753 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_plugin_setting_org_id_plugin_id - v1" 2026-03-10T06:58:45.753 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add column plugin_version to plugin_settings" 2026-03-10T06:58:45.753 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Update plugin_setting table charset" 2026-03-10T06:58:45.753 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="create session table" 2026-03-10T06:58:45.753 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Drop old table playlist table" 2026-03-10T06:58:45.753 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Drop old table playlist_item table" 2026-03-10T06:58:45.753 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="create playlist table v2" 2026-03-10T06:58:45.753 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="create playlist item table v2" 2026-03-10T06:58:45.753 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Update playlist table charset" 2026-03-10T06:58:45.753 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Update playlist_item table charset" 2026-03-10T06:58:45.753 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="drop preferences table v2" 2026-03-10T06:58:45.753 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="drop preferences table v3" 2026-03-10T06:58:45.753 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="create preferences table v3" 2026-03-10T06:58:45.753 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Update preferences table charset" 2026-03-10T06:58:45.753 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add column team_id in preferences" 2026-03-10T06:58:45.753 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Update team_id column values in preferences" 2026-03-10T06:58:45.753 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add column week_start in preferences" 2026-03-10T06:58:45.753 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="create alert table v1" 2026-03-10T06:58:45.754 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index alert org_id & id " 2026-03-10T06:58:45.754 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index alert state" 2026-03-10T06:58:45.754 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index alert dashboard_id" 2026-03-10T06:58:45.754 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Create alert_rule_tag table v1" 2026-03-10T06:58:45.754 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add unique index alert_rule_tag.alert_id_tag_id" 2026-03-10T06:58:45.754 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="drop index UQE_alert_rule_tag_alert_id_tag_id - v1" 2026-03-10T06:58:45.754 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Rename table alert_rule_tag to alert_rule_tag_v1 - v1" 2026-03-10T06:58:45.754 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Create alert_rule_tag table v2" 2026-03-10T06:58:45.754 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_alert_rule_tag_alert_id_tag_id - Add unique index alert_rule_tag.alert_id_tag_id V2" 2026-03-10T06:58:45.754 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="copy alert_rule_tag v1 to v2" 2026-03-10T06:58:45.754 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="drop table alert_rule_tag_v1" 2026-03-10T06:58:45.754 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="create alert_notification table v1" 2026-03-10T06:58:45.754 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add column is_default" 2026-03-10T06:58:45.754 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add column frequency" 2026-03-10T06:58:45.818 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:45 vm02 ceph-mon[54377]: pgmap v24: 129 pgs: 32 unknown, 97 active+clean; 451 KiB data, 51 MiB used, 160 GiB / 160 GiB avail; 5.2 KiB/s rd, 443 B/s wr, 6 op/s 2026-03-10T06:58:45.819 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:45 vm02 ceph-mon[54377]: from='client.? 192.168.123.102:0/1530393383' entity='client.admin' cmd='[{"prefix": "osd pool set", "pool": "z.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]': finished 2026-03-10T06:58:45.819 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:45 vm02 ceph-mon[54377]: osdmap e53: 8 total, 8 up, 8 in 2026-03-10T06:58:45.819 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:45 vm02 ceph-mon[54377]: from='client.? 192.168.123.102:0/1530393383' entity='client.admin' cmd=[{"prefix": "osd pool set", "pool": "z.rgw.meta", "var": "pg_num_min", "val": "8"}]: dispatch 2026-03-10T06:58:45.819 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:45 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:45.819 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:45 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:58:45.819 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:45 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:58:45.819 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:45 vm02 ceph-mon[54377]: from='client.? 192.168.123.102:0/1530393383' entity='client.admin' cmd='[{"prefix": "osd pool set", "pool": "z.rgw.meta", "var": "pg_num_min", "val": "8"}]': finished 2026-03-10T06:58:45.819 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:45 vm02 ceph-mon[54377]: osdmap e54: 8 total, 8 up, 8 in 2026-03-10T06:58:45.819 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 06:58:45 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[72075]: level=info ts=2026-03-10T06:58:45.818Z caller=cluster.go:688 component=cluster msg="gossip settled; proceeding" elapsed=10.002863853s 2026-03-10T06:58:45.819 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:45 vm02 ceph-mon[50158]: pgmap v24: 129 pgs: 32 unknown, 97 active+clean; 451 KiB data, 51 MiB used, 160 GiB / 160 GiB avail; 5.2 KiB/s rd, 443 B/s wr, 6 op/s 2026-03-10T06:58:45.819 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:45 vm02 ceph-mon[50158]: from='client.? 192.168.123.102:0/1530393383' entity='client.admin' cmd='[{"prefix": "osd pool set", "pool": "z.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]': finished 2026-03-10T06:58:45.819 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:45 vm02 ceph-mon[50158]: osdmap e53: 8 total, 8 up, 8 in 2026-03-10T06:58:45.819 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:45 vm02 ceph-mon[50158]: from='client.? 192.168.123.102:0/1530393383' entity='client.admin' cmd=[{"prefix": "osd pool set", "pool": "z.rgw.meta", "var": "pg_num_min", "val": "8"}]: dispatch 2026-03-10T06:58:45.819 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:45 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:45.819 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:45 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:58:45.819 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:45 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:58:45.819 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:45 vm02 ceph-mon[50158]: from='client.? 192.168.123.102:0/1530393383' entity='client.admin' cmd='[{"prefix": "osd pool set", "pool": "z.rgw.meta", "var": "pg_num_min", "val": "8"}]': finished 2026-03-10T06:58:45.819 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:45 vm02 ceph-mon[50158]: osdmap e54: 8 total, 8 up, 8 in 2026-03-10T06:58:45.865 INFO:teuthology.orchestra.run.vm02.stdout:{ 2026-03-10T06:58:45.865 INFO:teuthology.orchestra.run.vm02.stdout: "id": "3c3ffc7b-d357-42bd-9a14-08d146a067bd", 2026-03-10T06:58:45.865 INFO:teuthology.orchestra.run.vm02.stdout: "epoch": 1, 2026-03-10T06:58:45.865 INFO:teuthology.orchestra.run.vm02.stdout: "predecessor_uuid": "66fae4a1-38f2-4655-b0b4-d0e3d58bc665", 2026-03-10T06:58:45.865 INFO:teuthology.orchestra.run.vm02.stdout: "sync_status": [], 2026-03-10T06:58:45.865 INFO:teuthology.orchestra.run.vm02.stdout: "period_map": { 2026-03-10T06:58:45.865 INFO:teuthology.orchestra.run.vm02.stdout: "id": "3c3ffc7b-d357-42bd-9a14-08d146a067bd", 2026-03-10T06:58:45.866 INFO:teuthology.orchestra.run.vm02.stdout: "zonegroups": [ 2026-03-10T06:58:45.866 INFO:teuthology.orchestra.run.vm02.stdout: { 2026-03-10T06:58:45.866 INFO:teuthology.orchestra.run.vm02.stdout: "id": "698943e1-6efd-4aa1-ac06-c2ced18f8859", 2026-03-10T06:58:45.866 INFO:teuthology.orchestra.run.vm02.stdout: "name": "default", 2026-03-10T06:58:45.866 INFO:teuthology.orchestra.run.vm02.stdout: "api_name": "default", 2026-03-10T06:58:45.866 INFO:teuthology.orchestra.run.vm02.stdout: "is_master": "true", 2026-03-10T06:58:45.866 INFO:teuthology.orchestra.run.vm02.stdout: "endpoints": [], 2026-03-10T06:58:45.866 INFO:teuthology.orchestra.run.vm02.stdout: "hostnames": [], 2026-03-10T06:58:45.866 INFO:teuthology.orchestra.run.vm02.stdout: "hostnames_s3website": [], 2026-03-10T06:58:45.866 INFO:teuthology.orchestra.run.vm02.stdout: "master_zone": "9e2cd5bd-0321-4e1d-89d0-ed6408b253b2", 2026-03-10T06:58:45.866 INFO:teuthology.orchestra.run.vm02.stdout: "zones": [ 2026-03-10T06:58:45.866 INFO:teuthology.orchestra.run.vm02.stdout: { 2026-03-10T06:58:45.866 INFO:teuthology.orchestra.run.vm02.stdout: "id": "9e2cd5bd-0321-4e1d-89d0-ed6408b253b2", 2026-03-10T06:58:45.866 INFO:teuthology.orchestra.run.vm02.stdout: "name": "z", 2026-03-10T06:58:45.866 INFO:teuthology.orchestra.run.vm02.stdout: "endpoints": [], 2026-03-10T06:58:45.866 INFO:teuthology.orchestra.run.vm02.stdout: "log_meta": "false", 2026-03-10T06:58:45.866 INFO:teuthology.orchestra.run.vm02.stdout: "log_data": "false", 2026-03-10T06:58:45.866 INFO:teuthology.orchestra.run.vm02.stdout: "bucket_index_max_shards": 11, 2026-03-10T06:58:45.866 INFO:teuthology.orchestra.run.vm02.stdout: "read_only": "false", 2026-03-10T06:58:45.866 INFO:teuthology.orchestra.run.vm02.stdout: "tier_type": "", 2026-03-10T06:58:45.866 INFO:teuthology.orchestra.run.vm02.stdout: "sync_from_all": "true", 2026-03-10T06:58:45.866 INFO:teuthology.orchestra.run.vm02.stdout: "sync_from": [], 2026-03-10T06:58:45.866 INFO:teuthology.orchestra.run.vm02.stdout: "redirect_zone": "" 2026-03-10T06:58:45.866 INFO:teuthology.orchestra.run.vm02.stdout: } 2026-03-10T06:58:45.866 INFO:teuthology.orchestra.run.vm02.stdout: ], 2026-03-10T06:58:45.866 INFO:teuthology.orchestra.run.vm02.stdout: "placement_targets": [ 2026-03-10T06:58:45.866 INFO:teuthology.orchestra.run.vm02.stdout: { 2026-03-10T06:58:45.866 INFO:teuthology.orchestra.run.vm02.stdout: "name": "default-placement", 2026-03-10T06:58:45.866 INFO:teuthology.orchestra.run.vm02.stdout: "tags": [], 2026-03-10T06:58:45.866 INFO:teuthology.orchestra.run.vm02.stdout: "storage_classes": [ 2026-03-10T06:58:45.866 INFO:teuthology.orchestra.run.vm02.stdout: "STANDARD" 2026-03-10T06:58:45.866 INFO:teuthology.orchestra.run.vm02.stdout: ] 2026-03-10T06:58:45.866 INFO:teuthology.orchestra.run.vm02.stdout: } 2026-03-10T06:58:45.866 INFO:teuthology.orchestra.run.vm02.stdout: ], 2026-03-10T06:58:45.866 INFO:teuthology.orchestra.run.vm02.stdout: "default_placement": "default-placement", 2026-03-10T06:58:45.866 INFO:teuthology.orchestra.run.vm02.stdout: "realm_id": "185ed7af-6a6c-493b-b935-45e8f1d8f004", 2026-03-10T06:58:45.866 INFO:teuthology.orchestra.run.vm02.stdout: "sync_policy": { 2026-03-10T06:58:45.866 INFO:teuthology.orchestra.run.vm02.stdout: "groups": [] 2026-03-10T06:58:45.866 INFO:teuthology.orchestra.run.vm02.stdout: } 2026-03-10T06:58:45.866 INFO:teuthology.orchestra.run.vm02.stdout: } 2026-03-10T06:58:45.866 INFO:teuthology.orchestra.run.vm02.stdout: ], 2026-03-10T06:58:45.866 INFO:teuthology.orchestra.run.vm02.stdout: "short_zone_ids": [ 2026-03-10T06:58:45.867 INFO:teuthology.orchestra.run.vm02.stdout: { 2026-03-10T06:58:45.867 INFO:teuthology.orchestra.run.vm02.stdout: "key": "9e2cd5bd-0321-4e1d-89d0-ed6408b253b2", 2026-03-10T06:58:45.867 INFO:teuthology.orchestra.run.vm02.stdout: "val": 2401775289 2026-03-10T06:58:45.867 INFO:teuthology.orchestra.run.vm02.stdout: } 2026-03-10T06:58:45.867 INFO:teuthology.orchestra.run.vm02.stdout: ] 2026-03-10T06:58:45.867 INFO:teuthology.orchestra.run.vm02.stdout: }, 2026-03-10T06:58:45.867 INFO:teuthology.orchestra.run.vm02.stdout: "master_zonegroup": "698943e1-6efd-4aa1-ac06-c2ced18f8859", 2026-03-10T06:58:45.867 INFO:teuthology.orchestra.run.vm02.stdout: "master_zone": "9e2cd5bd-0321-4e1d-89d0-ed6408b253b2", 2026-03-10T06:58:45.867 INFO:teuthology.orchestra.run.vm02.stdout: "period_config": { 2026-03-10T06:58:45.867 INFO:teuthology.orchestra.run.vm02.stdout: "bucket_quota": { 2026-03-10T06:58:45.867 INFO:teuthology.orchestra.run.vm02.stdout: "enabled": false, 2026-03-10T06:58:45.867 INFO:teuthology.orchestra.run.vm02.stdout: "check_on_raw": false, 2026-03-10T06:58:45.867 INFO:teuthology.orchestra.run.vm02.stdout: "max_size": -1, 2026-03-10T06:58:45.867 INFO:teuthology.orchestra.run.vm02.stdout: "max_size_kb": 0, 2026-03-10T06:58:45.867 INFO:teuthology.orchestra.run.vm02.stdout: "max_objects": -1 2026-03-10T06:58:45.867 INFO:teuthology.orchestra.run.vm02.stdout: }, 2026-03-10T06:58:45.867 INFO:teuthology.orchestra.run.vm02.stdout: "user_quota": { 2026-03-10T06:58:45.867 INFO:teuthology.orchestra.run.vm02.stdout: "enabled": false, 2026-03-10T06:58:45.867 INFO:teuthology.orchestra.run.vm02.stdout: "check_on_raw": false, 2026-03-10T06:58:45.867 INFO:teuthology.orchestra.run.vm02.stdout: "max_size": -1, 2026-03-10T06:58:45.867 INFO:teuthology.orchestra.run.vm02.stdout: "max_size_kb": 0, 2026-03-10T06:58:45.867 INFO:teuthology.orchestra.run.vm02.stdout: "max_objects": -1 2026-03-10T06:58:45.867 INFO:teuthology.orchestra.run.vm02.stdout: }, 2026-03-10T06:58:45.867 INFO:teuthology.orchestra.run.vm02.stdout: "user_ratelimit": { 2026-03-10T06:58:45.867 INFO:teuthology.orchestra.run.vm02.stdout: "max_read_ops": 0, 2026-03-10T06:58:45.867 INFO:teuthology.orchestra.run.vm02.stdout: "max_write_ops": 0, 2026-03-10T06:58:45.867 INFO:teuthology.orchestra.run.vm02.stdout: "max_read_bytes": 0, 2026-03-10T06:58:45.867 INFO:teuthology.orchestra.run.vm02.stdout: "max_write_bytes": 0, 2026-03-10T06:58:45.867 INFO:teuthology.orchestra.run.vm02.stdout: "enabled": false 2026-03-10T06:58:45.867 INFO:teuthology.orchestra.run.vm02.stdout: }, 2026-03-10T06:58:45.867 INFO:teuthology.orchestra.run.vm02.stdout: "bucket_ratelimit": { 2026-03-10T06:58:45.867 INFO:teuthology.orchestra.run.vm02.stdout: "max_read_ops": 0, 2026-03-10T06:58:45.867 INFO:teuthology.orchestra.run.vm02.stdout: "max_write_ops": 0, 2026-03-10T06:58:45.867 INFO:teuthology.orchestra.run.vm02.stdout: "max_read_bytes": 0, 2026-03-10T06:58:45.867 INFO:teuthology.orchestra.run.vm02.stdout: "max_write_bytes": 0, 2026-03-10T06:58:45.867 INFO:teuthology.orchestra.run.vm02.stdout: "enabled": false 2026-03-10T06:58:45.867 INFO:teuthology.orchestra.run.vm02.stdout: }, 2026-03-10T06:58:45.867 INFO:teuthology.orchestra.run.vm02.stdout: "anonymous_ratelimit": { 2026-03-10T06:58:45.867 INFO:teuthology.orchestra.run.vm02.stdout: "max_read_ops": 0, 2026-03-10T06:58:45.867 INFO:teuthology.orchestra.run.vm02.stdout: "max_write_ops": 0, 2026-03-10T06:58:45.867 INFO:teuthology.orchestra.run.vm02.stdout: "max_read_bytes": 0, 2026-03-10T06:58:45.867 INFO:teuthology.orchestra.run.vm02.stdout: "max_write_bytes": 0, 2026-03-10T06:58:45.867 INFO:teuthology.orchestra.run.vm02.stdout: "enabled": false 2026-03-10T06:58:45.867 INFO:teuthology.orchestra.run.vm02.stdout: } 2026-03-10T06:58:45.867 INFO:teuthology.orchestra.run.vm02.stdout: }, 2026-03-10T06:58:45.867 INFO:teuthology.orchestra.run.vm02.stdout: "realm_id": "185ed7af-6a6c-493b-b935-45e8f1d8f004", 2026-03-10T06:58:45.867 INFO:teuthology.orchestra.run.vm02.stdout: "realm_name": "r", 2026-03-10T06:58:45.867 INFO:teuthology.orchestra.run.vm02.stdout: "realm_epoch": 2 2026-03-10T06:58:45.867 INFO:teuthology.orchestra.run.vm02.stdout:} 2026-03-10T06:58:45.986 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch apply rgw foo --realm r --zone z --placement=2 --port=8000' 2026-03-10T06:58:46.006 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:45 vm05 ceph-mon[48591]: pgmap v24: 129 pgs: 32 unknown, 97 active+clean; 451 KiB data, 51 MiB used, 160 GiB / 160 GiB avail; 5.2 KiB/s rd, 443 B/s wr, 6 op/s 2026-03-10T06:58:46.006 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:45 vm05 ceph-mon[48591]: from='client.? 192.168.123.102:0/1530393383' entity='client.admin' cmd='[{"prefix": "osd pool set", "pool": "z.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]': finished 2026-03-10T06:58:46.006 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:45 vm05 ceph-mon[48591]: osdmap e53: 8 total, 8 up, 8 in 2026-03-10T06:58:46.006 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:45 vm05 ceph-mon[48591]: from='client.? 192.168.123.102:0/1530393383' entity='client.admin' cmd=[{"prefix": "osd pool set", "pool": "z.rgw.meta", "var": "pg_num_min", "val": "8"}]: dispatch 2026-03-10T06:58:46.006 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:45 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:46.006 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:45 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:58:46.007 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:45 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:58:46.007 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:45 vm05 ceph-mon[48591]: from='client.? 192.168.123.102:0/1530393383' entity='client.admin' cmd='[{"prefix": "osd pool set", "pool": "z.rgw.meta", "var": "pg_num_min", "val": "8"}]': finished 2026-03-10T06:58:46.007 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:45 vm05 ceph-mon[48591]: osdmap e54: 8 total, 8 up, 8 in 2026-03-10T06:58:46.008 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add column send_reminder" 2026-03-10T06:58:46.008 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add column disable_resolve_message" 2026-03-10T06:58:46.008 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index alert_notification org_id & name" 2026-03-10T06:58:46.008 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Update alert table charset" 2026-03-10T06:58:46.008 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Update alert_notification table charset" 2026-03-10T06:58:46.008 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="create notification_journal table v1" 2026-03-10T06:58:46.008 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index notification_journal org_id & alert_id & notifier_id" 2026-03-10T06:58:46.008 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="drop alert_notification_journal" 2026-03-10T06:58:46.008 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="create alert_notification_state table v1" 2026-03-10T06:58:46.008 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index alert_notification_state org_id & alert_id & notifier_id" 2026-03-10T06:58:46.008 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add for to alert table" 2026-03-10T06:58:46.008 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add column uid in alert_notification" 2026-03-10T06:58:46.008 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Update uid column values in alert_notification" 2026-03-10T06:58:46.008 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add unique index alert_notification_org_id_uid" 2026-03-10T06:58:46.008 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Remove unique index org_id_name" 2026-03-10T06:58:46.008 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add column secure_settings in alert_notification" 2026-03-10T06:58:46.008 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="alter alert.settings to mediumtext" 2026-03-10T06:58:46.008 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add non-unique index alert_notification_state_alert_id" 2026-03-10T06:58:46.008 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add non-unique index alert_rule_tag_alert_id" 2026-03-10T06:58:46.008 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Drop old annotation table v4" 2026-03-10T06:58:46.008 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="create annotation table v5" 2026-03-10T06:58:46.008 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index annotation 0 v3" 2026-03-10T06:58:46.008 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index annotation 1 v3" 2026-03-10T06:58:46.008 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index annotation 2 v3" 2026-03-10T06:58:46.008 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index annotation 3 v3" 2026-03-10T06:58:46.008 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index annotation 4 v3" 2026-03-10T06:58:46.008 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Update annotation table charset" 2026-03-10T06:58:46.008 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add column region_id to annotation table" 2026-03-10T06:58:46.008 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Drop category_id index" 2026-03-10T06:58:46.008 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add column tags to annotation table" 2026-03-10T06:58:46.008 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Create annotation_tag table v2" 2026-03-10T06:58:46.008 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add unique index annotation_tag.annotation_id_tag_id" 2026-03-10T06:58:46.008 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="drop index UQE_annotation_tag_annotation_id_tag_id - v2" 2026-03-10T06:58:46.008 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Rename table annotation_tag to annotation_tag_v2 - v2" 2026-03-10T06:58:46.008 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Create annotation_tag table v3" 2026-03-10T06:58:46.008 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_annotation_tag_annotation_id_tag_id - Add unique index annotation_tag.annotation_id_tag_id V3" 2026-03-10T06:58:46.008 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="copy annotation_tag v2 to v3" 2026-03-10T06:58:46.008 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="drop table annotation_tag_v2" 2026-03-10T06:58:46.008 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Update alert annotations and set TEXT to empty" 2026-03-10T06:58:46.008 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add created time to annotation table" 2026-03-10T06:58:46.008 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add updated time to annotation table" 2026-03-10T06:58:46.008 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add index for created in annotation table" 2026-03-10T06:58:46.008 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add index for updated in annotation table" 2026-03-10T06:58:46.008 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Convert existing annotations from seconds to milliseconds" 2026-03-10T06:58:46.008 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add epoch_end column" 2026-03-10T06:58:46.008 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add index for epoch_end" 2026-03-10T06:58:46.008 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Make epoch_end the same as epoch" 2026-03-10T06:58:46.008 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Move region to single row" 2026-03-10T06:58:46.008 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Remove index org_id_epoch from annotation table" 2026-03-10T06:58:46.008 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Remove index org_id_dashboard_id_panel_id_epoch from annotation table" 2026-03-10T06:58:46.008 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add index for org_id_dashboard_id_epoch_end_epoch on annotation table" 2026-03-10T06:58:46.008 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add index for org_id_epoch_end_epoch on annotation table" 2026-03-10T06:58:46.008 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Remove index org_id_epoch_epoch_end from annotation table" 2026-03-10T06:58:46.008 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add index for alert_id on annotation table" 2026-03-10T06:58:46.009 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="create test_data table" 2026-03-10T06:58:46.009 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="create dashboard_version table v1" 2026-03-10T06:58:46.009 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index dashboard_version.dashboard_id" 2026-03-10T06:58:46.009 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index dashboard_version.dashboard_id and dashboard_version.version" 2026-03-10T06:58:46.009 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Set dashboard version to 1 where 0" 2026-03-10T06:58:46.009 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="save existing dashboard data in dashboard_version table v1" 2026-03-10T06:58:46.009 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="alter dashboard_version.data to mediumtext v1" 2026-03-10T06:58:46.009 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="create team table" 2026-03-10T06:58:46.009 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index team.org_id" 2026-03-10T06:58:46.009 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index team_org_id_name" 2026-03-10T06:58:46.009 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="create team member table" 2026-03-10T06:58:46.009 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index team_member.org_id" 2026-03-10T06:58:46.009 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index team_member_org_id_team_id_user_id" 2026-03-10T06:58:46.009 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index team_member.team_id" 2026-03-10T06:58:46.009 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add column email to team table" 2026-03-10T06:58:46.009 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add column external to team_member table" 2026-03-10T06:58:46.009 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add column permission to team_member table" 2026-03-10T06:58:46.009 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="create dashboard acl table" 2026-03-10T06:58:46.009 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index dashboard_acl_dashboard_id" 2026-03-10T06:58:46.009 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index dashboard_acl_dashboard_id_user_id" 2026-03-10T06:58:46.009 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index dashboard_acl_dashboard_id_team_id" 2026-03-10T06:58:46.009 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index dashboard_acl_user_id" 2026-03-10T06:58:46.009 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index dashboard_acl_team_id" 2026-03-10T06:58:46.009 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index dashboard_acl_org_id_role" 2026-03-10T06:58:46.009 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index dashboard_permission" 2026-03-10T06:58:46.009 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="save default acl rules in dashboard_acl table" 2026-03-10T06:58:46.009 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="delete acl rules for deleted dashboards and folders" 2026-03-10T06:58:46.009 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="create tag table" 2026-03-10T06:58:46.009 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index tag.key_value" 2026-03-10T06:58:46.009 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="create login attempt table" 2026-03-10T06:58:46.009 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index login_attempt.username" 2026-03-10T06:58:46.009 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="drop index IDX_login_attempt_username - v1" 2026-03-10T06:58:46.009 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Rename table login_attempt to login_attempt_tmp_qwerty - v1" 2026-03-10T06:58:46.009 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="create login_attempt v2" 2026-03-10T06:58:46.009 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_login_attempt_username - v2" 2026-03-10T06:58:46.009 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="copy login_attempt v1 to v2" 2026-03-10T06:58:46.009 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="drop login_attempt_tmp_qwerty" 2026-03-10T06:58:46.009 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="create user auth table" 2026-03-10T06:58:46.009 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_user_auth_auth_module_auth_id - v1" 2026-03-10T06:58:46.009 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="alter user_auth.auth_id to length 190" 2026-03-10T06:58:46.009 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add OAuth access token to user_auth" 2026-03-10T06:58:46.009 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add OAuth refresh token to user_auth" 2026-03-10T06:58:46.009 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add OAuth token type to user_auth" 2026-03-10T06:58:46.009 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add OAuth expiry to user_auth" 2026-03-10T06:58:46.009 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add index to user_id column in user_auth" 2026-03-10T06:58:46.009 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="create server_lock table" 2026-03-10T06:58:46.009 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index server_lock.operation_uid" 2026-03-10T06:58:46.009 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="create user auth token table" 2026-03-10T06:58:46.010 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index user_auth_token.auth_token" 2026-03-10T06:58:46.010 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index user_auth_token.prev_auth_token" 2026-03-10T06:58:46.010 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index user_auth_token.user_id" 2026-03-10T06:58:46.010 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add revoked_at to the user auth token" 2026-03-10T06:58:46.010 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="create cache_data table" 2026-03-10T06:58:46.010 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index cache_data.cache_key" 2026-03-10T06:58:46.010 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="create short_url table v1" 2026-03-10T06:58:46.010 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index short_url.org_id-uid" 2026-03-10T06:58:46.010 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="delete alert_definition table" 2026-03-10T06:58:46.010 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="recreate alert_definition table" 2026-03-10T06:58:46.010 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_definition on org_id and title columns" 2026-03-10T06:58:46.010 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_definition on org_id and uid columns" 2026-03-10T06:58:46.010 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="alter alert_definition table data column to mediumtext in mysql" 2026-03-10T06:58:46.010 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="drop index in alert_definition on org_id and title columns" 2026-03-10T06:58:46.010 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="drop index in alert_definition on org_id and uid columns" 2026-03-10T06:58:46.010 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index in alert_definition on org_id and title columns" 2026-03-10T06:58:46.010 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index in alert_definition on org_id and uid columns" 2026-03-10T06:58:46.010 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="Add column paused in alert_definition" 2026-03-10T06:58:46.010 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="drop alert_definition table" 2026-03-10T06:58:46.010 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="delete alert_definition_version table" 2026-03-10T06:58:46.010 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="recreate alert_definition_version table" 2026-03-10T06:58:46.010 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_definition_version table on alert_definition_id and version columns" 2026-03-10T06:58:46.010 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_definition_version table on alert_definition_uid and version columns" 2026-03-10T06:58:46.010 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="alter alert_definition_version table data column to mediumtext in mysql" 2026-03-10T06:58:46.010 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="drop alert_definition_version table" 2026-03-10T06:58:46.010 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="create alert_instance table" 2026-03-10T06:58:46.010 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_instance table on def_org_id, def_uid and current_state columns" 2026-03-10T06:58:46.010 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_instance table on def_org_id, current_state columns" 2026-03-10T06:58:46.010 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="add column current_state_end to alert_instance" 2026-03-10T06:58:46.010 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="remove index def_org_id, def_uid, current_state on alert_instance" 2026-03-10T06:58:46.010 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="remove index def_org_id, current_state on alert_instance" 2026-03-10T06:58:46.010 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="rename def_org_id to rule_org_id in alert_instance" 2026-03-10T06:58:46.010 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="rename def_uid to rule_uid in alert_instance" 2026-03-10T06:58:46.010 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index rule_org_id, rule_uid, current_state on alert_instance" 2026-03-10T06:58:46.010 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index rule_org_id, current_state on alert_instance" 2026-03-10T06:58:46.010 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="create alert_rule table" 2026-03-10T06:58:46.010 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_rule on org_id and title columns" 2026-03-10T06:58:46.010 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_rule on org_id and uid columns" 2026-03-10T06:58:46.010 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_rule on org_id, namespace_uid, group_uid columns" 2026-03-10T06:58:46.010 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="alter alert_rule table data column to mediumtext in mysql" 2026-03-10T06:58:46.010 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="add column for to alert_rule" 2026-03-10T06:58:46.010 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="add column annotations to alert_rule" 2026-03-10T06:58:46.010 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="add column labels to alert_rule" 2026-03-10T06:58:46.010 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="remove unique index from alert_rule on org_id, title columns" 2026-03-10T06:58:46.010 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:45 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:45+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_rule on org_id, namespase_uid and title columns" 2026-03-10T06:58:46.010 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:46 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:46+0000 lvl=info msg="Executing migration" logger=migrator id="add dashboard_uid column to alert_rule" 2026-03-10T06:58:46.397 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 06:58:46 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[49719]: ::ffff:192.168.123.105 - - [10/Mar/2026:06:58:46] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-10T06:58:46.397 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:46 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:46+0000 lvl=info msg="Executing migration" logger=migrator id="add panel_id column to alert_rule" 2026-03-10T06:58:46.397 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:46 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:46+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_rule on org_id, dashboard_uid and panel_id columns" 2026-03-10T06:58:46.397 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:46 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:46+0000 lvl=info msg="Executing migration" logger=migrator id="create alert_rule_version table" 2026-03-10T06:58:46.397 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:46 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:46+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_rule_version table on rule_org_id, rule_uid and version columns" 2026-03-10T06:58:46.397 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:46 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:46+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_rule_version table on rule_org_id, rule_namespace_uid and rule_group columns" 2026-03-10T06:58:46.397 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:46 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:46+0000 lvl=info msg="Executing migration" logger=migrator id="alter alert_rule_version table data column to mediumtext in mysql" 2026-03-10T06:58:46.397 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:46 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:46+0000 lvl=info msg="Executing migration" logger=migrator id="add column for to alert_rule_version" 2026-03-10T06:58:46.397 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:46 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:46+0000 lvl=info msg="Executing migration" logger=migrator id="add column annotations to alert_rule_version" 2026-03-10T06:58:46.397 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:46 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:46+0000 lvl=info msg="Executing migration" logger=migrator id="add column labels to alert_rule_version" 2026-03-10T06:58:46.397 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:46 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:46+0000 lvl=info msg="Executing migration" logger=migrator id=create_alert_configuration_table 2026-03-10T06:58:46.397 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:46 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:46+0000 lvl=info msg="Executing migration" logger=migrator id="Add column default in alert_configuration" 2026-03-10T06:58:46.397 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:46 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:46+0000 lvl=info msg="Executing migration" logger=migrator id="alert alert_configuration alertmanager_configuration column from TEXT to MEDIUMTEXT if mysql" 2026-03-10T06:58:46.397 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:46 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:46+0000 lvl=info msg="Executing migration" logger=migrator id="add column org_id in alert_configuration" 2026-03-10T06:58:46.397 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:46 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:46+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_configuration table on org_id column" 2026-03-10T06:58:46.397 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:46 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:46+0000 lvl=info msg="Executing migration" logger=migrator id=create_ngalert_configuration_table 2026-03-10T06:58:46.397 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:46 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:46+0000 lvl=info msg="Executing migration" logger=migrator id="add index in ngalert_configuration on org_id column" 2026-03-10T06:58:46.397 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:46 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:46+0000 lvl=info msg="Executing migration" logger=migrator id="clear migration entry \"remove unified alerting data\"" 2026-03-10T06:58:46.397 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:46 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:46+0000 lvl=info msg="Executing migration" logger=migrator id="move dashboard alerts to unified alerting" 2026-03-10T06:58:46.397 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:46 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:46+0000 lvl=info msg="Executing migration" logger=migrator id="create library_element table v1" 2026-03-10T06:58:46.397 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:46 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:46+0000 lvl=info msg="Executing migration" logger=migrator id="add index library_element org_id-folder_id-name-kind" 2026-03-10T06:58:46.397 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:46 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:46+0000 lvl=info msg="Executing migration" logger=migrator id="create library_element_connection table v1" 2026-03-10T06:58:46.397 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:46 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:46+0000 lvl=info msg="Executing migration" logger=migrator id="add index library_element_connection element_id-kind-connection_id" 2026-03-10T06:58:46.397 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:46 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:46+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index library_element org_id_uid" 2026-03-10T06:58:46.397 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:46 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:46+0000 lvl=info msg="Executing migration" logger=migrator id="clone move dashboard alerts to unified alerting" 2026-03-10T06:58:46.397 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:46 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:46+0000 lvl=info msg="Executing migration" logger=migrator id="create data_keys table" 2026-03-10T06:58:46.397 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:46 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:46+0000 lvl=info msg="Executing migration" logger=migrator id="create kv_store table v1" 2026-03-10T06:58:46.397 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:46 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:46+0000 lvl=info msg="Executing migration" logger=migrator id="add index kv_store.org_id-namespace-key" 2026-03-10T06:58:46.397 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:46 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:46+0000 lvl=info msg="Executing migration" logger=migrator id="update dashboard_uid and panel_id from existing annotations" 2026-03-10T06:58:46.398 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:46 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:46+0000 lvl=info msg="Executing migration" logger=migrator id="create permission table" 2026-03-10T06:58:46.398 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:46 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:46+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index permission.role_id" 2026-03-10T06:58:46.398 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:46 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:46+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index role_id_action_scope" 2026-03-10T06:58:46.398 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:46 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:46+0000 lvl=info msg="Executing migration" logger=migrator id="create role table" 2026-03-10T06:58:46.398 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:46 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:46+0000 lvl=info msg="Executing migration" logger=migrator id="add column display_name" 2026-03-10T06:58:46.398 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:46 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:46+0000 lvl=info msg="Executing migration" logger=migrator id="add column group_name" 2026-03-10T06:58:46.398 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:46 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:46+0000 lvl=info msg="Executing migration" logger=migrator id="add index role.org_id" 2026-03-10T06:58:46.398 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:46 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:46+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index role_org_id_name" 2026-03-10T06:58:46.398 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:46 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:46+0000 lvl=info msg="Executing migration" logger=migrator id="add index role_org_id_uid" 2026-03-10T06:58:46.398 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:46 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:46+0000 lvl=info msg="Executing migration" logger=migrator id="create team role table" 2026-03-10T06:58:46.398 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:46 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:46+0000 lvl=info msg="Executing migration" logger=migrator id="add index team_role.org_id" 2026-03-10T06:58:46.398 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:46 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:46+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index team_role_org_id_team_id_role_id" 2026-03-10T06:58:46.398 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:46 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:46+0000 lvl=info msg="Executing migration" logger=migrator id="add index team_role.team_id" 2026-03-10T06:58:46.398 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:46 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:46+0000 lvl=info msg="Executing migration" logger=migrator id="create user role table" 2026-03-10T06:58:46.398 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:46 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:46+0000 lvl=info msg="Executing migration" logger=migrator id="add index user_role.org_id" 2026-03-10T06:58:46.398 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:46 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:46+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index user_role_org_id_user_id_role_id" 2026-03-10T06:58:46.398 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:46 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:46+0000 lvl=info msg="Executing migration" logger=migrator id="add index user_role.user_id" 2026-03-10T06:58:46.398 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:46 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:46+0000 lvl=info msg="Executing migration" logger=migrator id="create builtin role table" 2026-03-10T06:58:46.398 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:46 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:46+0000 lvl=info msg="Executing migration" logger=migrator id="add index builtin_role.role_id" 2026-03-10T06:58:46.398 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:46 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:46+0000 lvl=info msg="Executing migration" logger=migrator id="add index builtin_role.name" 2026-03-10T06:58:46.398 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:46 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:46+0000 lvl=info msg="Executing migration" logger=migrator id="Add column org_id to builtin_role table" 2026-03-10T06:58:46.398 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:46 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:46+0000 lvl=info msg="Executing migration" logger=migrator id="add index builtin_role.org_id" 2026-03-10T06:58:46.398 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:46 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:46+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index builtin_role_org_id_role_id_role" 2026-03-10T06:58:46.398 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:46 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:46+0000 lvl=info msg="Executing migration" logger=migrator id="Remove unique index role_org_id_uid" 2026-03-10T06:58:46.398 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:46 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:46+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index role.uid" 2026-03-10T06:58:46.398 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:46 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:46+0000 lvl=info msg="Executing migration" logger=migrator id="create seed assignment table" 2026-03-10T06:58:46.398 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:46 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:46+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index builtin_role_role_name" 2026-03-10T06:58:46.398 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:46 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:46+0000 lvl=info msg="migrations completed" logger=migrator performed=381 skipped=0 duration=539.062651ms 2026-03-10T06:58:46.398 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:46 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:46+0000 lvl=info msg="Created default organization" logger=sqlstore 2026-03-10T06:58:46.398 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:46 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:46+0000 lvl=info msg="Initialising plugins" logger=plugin.manager 2026-03-10T06:58:46.398 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:46 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:46+0000 lvl=info msg="Plugin registered" logger=plugin.manager pluginId=input 2026-03-10T06:58:46.398 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:46 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:46+0000 lvl=info msg="Plugin registered" logger=plugin.manager pluginId=grafana-piechart-panel 2026-03-10T06:58:46.398 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:46 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:46+0000 lvl=info msg="Plugin registered" logger=plugin.manager pluginId=vonage-status-panel 2026-03-10T06:58:46.398 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:46 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:46+0000 lvl=info msg="Live Push Gateway initialization" logger=live.push_http 2026-03-10T06:58:46.398 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:46 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:46+0000 lvl=warn msg="[Deprecated] the datasource provisioning config is outdated. please upgrade" logger=provisioning.datasources filename=/etc/grafana/provisioning/datasources/ceph-dashboard.yml 2026-03-10T06:58:46.398 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:46 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:46+0000 lvl=info msg="inserting datasource from configuration " logger=provisioning.datasources name=Dashboard1 uid=P43CA22E17D0F9596 2026-03-10T06:58:46.398 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:46 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:46+0000 lvl=info msg="warming cache for startup" logger=ngalert 2026-03-10T06:58:46.398 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:46 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:46+0000 lvl=info msg="starting MultiOrg Alertmanager" logger=ngalert.multiorg.alertmanager 2026-03-10T06:58:46.398 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 06:58:46 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T06:58:46+0000 lvl=info msg="HTTP Server Listen" logger=http.server address=[::]:3000 protocol=https subUrl= socket= 2026-03-10T06:58:46.587 INFO:teuthology.orchestra.run.vm02.stdout:Scheduled rgw.foo update... 2026-03-10T06:58:46.666 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch apply rgw smpl' 2026-03-10T06:58:47.270 INFO:teuthology.orchestra.run.vm02.stdout:Scheduled rgw.smpl update... 2026-03-10T06:58:47.443 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph osd pool create foo' 2026-03-10T06:58:47.770 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:47 vm02 ceph-mon[50158]: pgmap v27: 129 pgs: 23 unknown, 106 active+clean; 452 KiB data, 51 MiB used, 160 GiB / 160 GiB avail; 443 B/s rd, 443 B/s wr, 1 op/s 2026-03-10T06:58:47.770 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:47 vm02 ceph-mon[50158]: from='client.24499 -' entity='client.admin' cmd=[{"prefix": "orch apply rgw", "svc_id": "foo", "realm": "r", "zone": "z", "placement": "2", "port": 8000, "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T06:58:47.770 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:47 vm02 ceph-mon[50158]: Saving service rgw.foo spec with placement count:2 2026-03-10T06:58:47.770 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:47 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:47.770 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:47 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:47.770 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:47 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:47.770 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:47 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:47.770 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:47 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:47.770 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:47 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:47.770 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:47 vm02 ceph-mon[50158]: Saving service rgw.foo spec with placement count:2 2026-03-10T06:58:47.770 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:47 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:47.770 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:47 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm02.kkmsll", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T06:58:47.770 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:47 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm02.kkmsll", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-10T06:58:47.770 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:47 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:47.770 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:47 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:58:47.770 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:47 vm02 ceph-mon[50158]: Deploying daemon rgw.foo.vm02.kkmsll on vm02 2026-03-10T06:58:47.771 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:47 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:47.771 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:47 vm02 ceph-mon[54377]: pgmap v27: 129 pgs: 23 unknown, 106 active+clean; 452 KiB data, 51 MiB used, 160 GiB / 160 GiB avail; 443 B/s rd, 443 B/s wr, 1 op/s 2026-03-10T06:58:47.771 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:47 vm02 ceph-mon[54377]: from='client.24499 -' entity='client.admin' cmd=[{"prefix": "orch apply rgw", "svc_id": "foo", "realm": "r", "zone": "z", "placement": "2", "port": 8000, "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T06:58:47.771 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:47 vm02 ceph-mon[54377]: Saving service rgw.foo spec with placement count:2 2026-03-10T06:58:47.771 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:47 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:47.771 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:47 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:47.771 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:47 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:47.771 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:47 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:47.771 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:47 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:47.771 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:47 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:47.771 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:47 vm02 ceph-mon[54377]: Saving service rgw.foo spec with placement count:2 2026-03-10T06:58:47.771 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:47 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:47.771 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:47 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm02.kkmsll", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T06:58:47.771 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:47 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm02.kkmsll", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-10T06:58:47.771 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:47 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:47.771 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:47 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:58:47.771 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:47 vm02 ceph-mon[54377]: Deploying daemon rgw.foo.vm02.kkmsll on vm02 2026-03-10T06:58:47.771 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:47 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:47.975 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:47 vm05 ceph-mon[48591]: pgmap v27: 129 pgs: 23 unknown, 106 active+clean; 452 KiB data, 51 MiB used, 160 GiB / 160 GiB avail; 443 B/s rd, 443 B/s wr, 1 op/s 2026-03-10T06:58:47.975 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:47 vm05 ceph-mon[48591]: from='client.24499 -' entity='client.admin' cmd=[{"prefix": "orch apply rgw", "svc_id": "foo", "realm": "r", "zone": "z", "placement": "2", "port": 8000, "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T06:58:47.975 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:47 vm05 ceph-mon[48591]: Saving service rgw.foo spec with placement count:2 2026-03-10T06:58:47.975 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:47 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:47.975 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:47 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:47.975 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:47 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:47.975 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:47 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:47.975 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:47 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:47.975 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:47 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:47.975 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:47 vm05 ceph-mon[48591]: Saving service rgw.foo spec with placement count:2 2026-03-10T06:58:47.975 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:47 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:47.975 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:47 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm02.kkmsll", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T06:58:47.975 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:47 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm02.kkmsll", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-10T06:58:47.975 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:47 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:47.975 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:47 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:58:47.975 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:47 vm05 ceph-mon[48591]: Deploying daemon rgw.foo.vm02.kkmsll on vm02 2026-03-10T06:58:47.976 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:47 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:48.085 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 06:58:47 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[50367]: ::ffff:192.168.123.105 - - [10/Mar/2026:06:58:47] "GET /metrics HTTP/1.1" 200 192200 "" "Prometheus/2.33.4" 2026-03-10T06:58:48.962 INFO:teuthology.orchestra.run.vm02.stderr:pool 'foo' created 2026-03-10T06:58:49.015 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'rbd pool init foo' 2026-03-10T06:58:49.220 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:48 vm02 ceph-mon[50158]: from='client.24502 -' entity='client.admin' cmd=[{"prefix": "orch apply rgw", "svc_id": "smpl", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T06:58:49.220 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:48 vm02 ceph-mon[50158]: Saving service rgw.smpl spec with placement count:2 2026-03-10T06:58:49.220 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:48 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:49.220 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:48 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm05.bmslvs", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T06:58:49.220 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:48 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm05.bmslvs", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-10T06:58:49.220 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:48 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:49.220 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:48 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:58:49.220 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:48 vm02 ceph-mon[50158]: Deploying daemon rgw.foo.vm05.bmslvs on vm05 2026-03-10T06:58:49.220 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:48 vm02 ceph-mon[50158]: from='client.? 192.168.123.102:0/1588190290' entity='client.admin' cmd=[{"prefix": "osd pool create", "pool": "foo"}]: dispatch 2026-03-10T06:58:49.221 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:48 vm02 ceph-mon[50158]: from='client.? ' entity='client.admin' cmd=[{"prefix": "osd pool create", "pool": "foo"}]: dispatch 2026-03-10T06:58:49.221 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:48 vm02 ceph-mon[54377]: from='client.24502 -' entity='client.admin' cmd=[{"prefix": "orch apply rgw", "svc_id": "smpl", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T06:58:49.221 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:48 vm02 ceph-mon[54377]: Saving service rgw.smpl spec with placement count:2 2026-03-10T06:58:49.221 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:48 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:49.221 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:48 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm05.bmslvs", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T06:58:49.221 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:48 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm05.bmslvs", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-10T06:58:49.221 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:48 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:49.221 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:48 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:58:49.221 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:48 vm02 ceph-mon[54377]: Deploying daemon rgw.foo.vm05.bmslvs on vm05 2026-03-10T06:58:49.221 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:48 vm02 ceph-mon[54377]: from='client.? 192.168.123.102:0/1588190290' entity='client.admin' cmd=[{"prefix": "osd pool create", "pool": "foo"}]: dispatch 2026-03-10T06:58:49.221 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:48 vm02 ceph-mon[54377]: from='client.? ' entity='client.admin' cmd=[{"prefix": "osd pool create", "pool": "foo"}]: dispatch 2026-03-10T06:58:49.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:48 vm05 ceph-mon[48591]: from='client.24502 -' entity='client.admin' cmd=[{"prefix": "orch apply rgw", "svc_id": "smpl", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T06:58:49.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:48 vm05 ceph-mon[48591]: Saving service rgw.smpl spec with placement count:2 2026-03-10T06:58:49.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:48 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:49.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:48 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm05.bmslvs", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T06:58:49.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:48 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm05.bmslvs", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-10T06:58:49.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:48 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:49.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:48 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:58:49.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:48 vm05 ceph-mon[48591]: Deploying daemon rgw.foo.vm05.bmslvs on vm05 2026-03-10T06:58:49.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:48 vm05 ceph-mon[48591]: from='client.? 192.168.123.102:0/1588190290' entity='client.admin' cmd=[{"prefix": "osd pool create", "pool": "foo"}]: dispatch 2026-03-10T06:58:49.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:48 vm05 ceph-mon[48591]: from='client.? ' entity='client.admin' cmd=[{"prefix": "osd pool create", "pool": "foo"}]: dispatch 2026-03-10T06:58:49.984 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:49 vm05 ceph-mon[48591]: pgmap v28: 129 pgs: 129 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1.2 KiB/s wr, 2 op/s 2026-03-10T06:58:49.984 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:49 vm05 ceph-mon[48591]: from='client.? ' entity='client.admin' cmd='[{"prefix": "osd pool create", "pool": "foo"}]': finished 2026-03-10T06:58:49.984 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:49 vm05 ceph-mon[48591]: osdmap e55: 8 total, 8 up, 8 in 2026-03-10T06:58:49.984 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:49 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:49.984 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:49 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:58:49.984 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:49 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:58:49.984 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:49 vm05 ceph-mon[48591]: from='client.? 192.168.123.102:0/2023641863' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "foo","app": "rbd"}]: dispatch 2026-03-10T06:58:50.267 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:49 vm02 ceph-mon[50158]: pgmap v28: 129 pgs: 129 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1.2 KiB/s wr, 2 op/s 2026-03-10T06:58:50.267 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:49 vm02 ceph-mon[50158]: from='client.? ' entity='client.admin' cmd='[{"prefix": "osd pool create", "pool": "foo"}]': finished 2026-03-10T06:58:50.267 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:49 vm02 ceph-mon[50158]: osdmap e55: 8 total, 8 up, 8 in 2026-03-10T06:58:50.267 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:49 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:50.267 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:49 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:58:50.267 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:49 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:58:50.267 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:49 vm02 ceph-mon[50158]: from='client.? 192.168.123.102:0/2023641863' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "foo","app": "rbd"}]: dispatch 2026-03-10T06:58:50.267 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:49 vm02 ceph-mon[54377]: pgmap v28: 129 pgs: 129 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1.2 KiB/s wr, 2 op/s 2026-03-10T06:58:50.267 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:49 vm02 ceph-mon[54377]: from='client.? ' entity='client.admin' cmd='[{"prefix": "osd pool create", "pool": "foo"}]': finished 2026-03-10T06:58:50.267 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:49 vm02 ceph-mon[54377]: osdmap e55: 8 total, 8 up, 8 in 2026-03-10T06:58:50.267 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:49 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:50.267 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:49 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:58:50.267 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:49 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:58:50.267 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:49 vm02 ceph-mon[54377]: from='client.? 192.168.123.102:0/2023641863' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "foo","app": "rbd"}]: dispatch 2026-03-10T06:58:51.278 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:51 vm02 ceph-mon[50158]: from='client.? 192.168.123.102:0/2023641863' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": "foo","app": "rbd"}]': finished 2026-03-10T06:58:51.278 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:51 vm02 ceph-mon[50158]: osdmap e56: 8 total, 8 up, 8 in 2026-03-10T06:58:51.278 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:51 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:51.278 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:51 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:51.278 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:51 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:51.278 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:51 vm02 ceph-mon[54377]: from='client.? 192.168.123.102:0/2023641863' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": "foo","app": "rbd"}]': finished 2026-03-10T06:58:51.278 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:51 vm02 ceph-mon[54377]: osdmap e56: 8 total, 8 up, 8 in 2026-03-10T06:58:51.278 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:51 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:51.278 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:51 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:51.278 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:51 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:51.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:51 vm05 ceph-mon[48591]: from='client.? 192.168.123.102:0/2023641863' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": "foo","app": "rbd"}]': finished 2026-03-10T06:58:51.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:51 vm05 ceph-mon[48591]: osdmap e56: 8 total, 8 up, 8 in 2026-03-10T06:58:51.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:51 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:51.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:51 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:51.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:51 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:52.117 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch apply iscsi foo u p' 2026-03-10T06:58:52.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:52 vm02 ceph-mon[50158]: pgmap v31: 161 pgs: 32 unknown, 129 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 1.3 KiB/s rd, 1.3 KiB/s wr, 3 op/s 2026-03-10T06:58:52.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:52 vm02 ceph-mon[50158]: Checking dashboard <-> RGW credentials 2026-03-10T06:58:52.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:52 vm02 ceph-mon[50158]: osdmap e57: 8 total, 8 up, 8 in 2026-03-10T06:58:52.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:52 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:52.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:52 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:52.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:52 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:52.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:52 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:52.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:52 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.smpl.vm02.kyvfxo", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T06:58:52.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:52 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.smpl.vm02.kyvfxo", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-10T06:58:52.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:52 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:52.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:52 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:58:52.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:52 vm02 ceph-mon[54377]: pgmap v31: 161 pgs: 32 unknown, 129 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 1.3 KiB/s rd, 1.3 KiB/s wr, 3 op/s 2026-03-10T06:58:52.336 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:52 vm02 ceph-mon[54377]: Checking dashboard <-> RGW credentials 2026-03-10T06:58:52.336 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:52 vm02 ceph-mon[54377]: osdmap e57: 8 total, 8 up, 8 in 2026-03-10T06:58:52.336 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:52 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:52.336 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:52 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:52.336 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:52 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:52.336 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:52 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:52.336 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:52 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.smpl.vm02.kyvfxo", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T06:58:52.336 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:52 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.smpl.vm02.kyvfxo", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-10T06:58:52.336 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:52 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:52.336 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:52 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:58:52.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:52 vm05 ceph-mon[48591]: pgmap v31: 161 pgs: 32 unknown, 129 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 1.3 KiB/s rd, 1.3 KiB/s wr, 3 op/s 2026-03-10T06:58:52.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:52 vm05 ceph-mon[48591]: Checking dashboard <-> RGW credentials 2026-03-10T06:58:52.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:52 vm05 ceph-mon[48591]: osdmap e57: 8 total, 8 up, 8 in 2026-03-10T06:58:52.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:52 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:52.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:52 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:52.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:52 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:52.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:52 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:52.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:52 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.smpl.vm02.kyvfxo", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T06:58:52.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:52 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.smpl.vm02.kyvfxo", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-10T06:58:52.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:52 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:52.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:52 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:58:52.769 INFO:teuthology.orchestra.run.vm02.stdout:Scheduled iscsi.foo update... 2026-03-10T06:58:52.864 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'sleep 120' 2026-03-10T06:58:53.051 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:53 vm02 ceph-mon[50158]: Saving service rgw.smpl spec with placement count:2 2026-03-10T06:58:53.051 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:53 vm02 ceph-mon[50158]: Deploying daemon rgw.smpl.vm02.kyvfxo on vm02 2026-03-10T06:58:53.051 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:53 vm02 ceph-mon[50158]: osdmap e58: 8 total, 8 up, 8 in 2026-03-10T06:58:53.051 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:53 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:53.051 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:53 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.smpl.vm05.xjafam", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T06:58:53.051 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:53 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.smpl.vm05.xjafam", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-10T06:58:53.051 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:53 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:53.051 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:53 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:58:53.051 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:53 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:53.051 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:53 vm02 ceph-mon[54377]: Saving service rgw.smpl spec with placement count:2 2026-03-10T06:58:53.051 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:53 vm02 ceph-mon[54377]: Deploying daemon rgw.smpl.vm02.kyvfxo on vm02 2026-03-10T06:58:53.051 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:53 vm02 ceph-mon[54377]: osdmap e58: 8 total, 8 up, 8 in 2026-03-10T06:58:53.051 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:53 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:53.051 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:53 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.smpl.vm05.xjafam", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T06:58:53.051 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:53 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.smpl.vm05.xjafam", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-10T06:58:53.051 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:53 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:53.051 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:53 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:58:53.051 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:53 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:53.234 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:53 vm05 ceph-mon[48591]: Saving service rgw.smpl spec with placement count:2 2026-03-10T06:58:53.234 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:53 vm05 ceph-mon[48591]: Deploying daemon rgw.smpl.vm02.kyvfxo on vm02 2026-03-10T06:58:53.234 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:53 vm05 ceph-mon[48591]: osdmap e58: 8 total, 8 up, 8 in 2026-03-10T06:58:53.234 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:53 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:53.234 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:53 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.smpl.vm05.xjafam", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T06:58:53.234 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:53 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.smpl.vm05.xjafam", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-10T06:58:53.234 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:53 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:53.235 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:53 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:58:53.235 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:53 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:54.056 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:54 vm02 ceph-mon[50158]: Deploying daemon rgw.smpl.vm05.xjafam on vm05 2026-03-10T06:58:54.056 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:54 vm02 ceph-mon[50158]: pgmap v34: 161 pgs: 161 active+clean; 457 KiB data, 57 MiB used, 160 GiB / 160 GiB avail; 288 KiB/s rd, 9.2 KiB/s wr, 542 op/s 2026-03-10T06:58:54.056 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:54 vm02 ceph-mon[50158]: from='client.14727 -' entity='client.admin' cmd=[{"prefix": "orch apply iscsi", "pool": "foo", "api_user": "u", "api_password": "p", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T06:58:54.056 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:54 vm02 ceph-mon[50158]: Saving service iscsi.foo spec with placement count:1 2026-03-10T06:58:54.056 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:54 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:54.056 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:54 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:58:54.056 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:54 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:58:54.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:54 vm02 ceph-mon[54377]: Deploying daemon rgw.smpl.vm05.xjafam on vm05 2026-03-10T06:58:54.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:54 vm02 ceph-mon[54377]: pgmap v34: 161 pgs: 161 active+clean; 457 KiB data, 57 MiB used, 160 GiB / 160 GiB avail; 288 KiB/s rd, 9.2 KiB/s wr, 542 op/s 2026-03-10T06:58:54.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:54 vm02 ceph-mon[54377]: from='client.14727 -' entity='client.admin' cmd=[{"prefix": "orch apply iscsi", "pool": "foo", "api_user": "u", "api_password": "p", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T06:58:54.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:54 vm02 ceph-mon[54377]: Saving service iscsi.foo spec with placement count:1 2026-03-10T06:58:54.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:54 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:54.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:54 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:58:54.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:54 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:58:54.361 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:54 vm05 ceph-mon[48591]: Deploying daemon rgw.smpl.vm05.xjafam on vm05 2026-03-10T06:58:54.361 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:54 vm05 ceph-mon[48591]: pgmap v34: 161 pgs: 161 active+clean; 457 KiB data, 57 MiB used, 160 GiB / 160 GiB avail; 288 KiB/s rd, 9.2 KiB/s wr, 542 op/s 2026-03-10T06:58:54.361 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:54 vm05 ceph-mon[48591]: from='client.14727 -' entity='client.admin' cmd=[{"prefix": "orch apply iscsi", "pool": "foo", "api_user": "u", "api_password": "p", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T06:58:54.361 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:54 vm05 ceph-mon[48591]: Saving service iscsi.foo spec with placement count:1 2026-03-10T06:58:54.361 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:54 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:54.361 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:54 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:58:54.361 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:54 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:58:55.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:55 vm02 ceph-mon[50158]: pgmap v35: 161 pgs: 161 active+clean; 457 KiB data, 57 MiB used, 160 GiB / 160 GiB avail; 218 KiB/s rd, 7.0 KiB/s wr, 409 op/s 2026-03-10T06:58:55.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:55 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:55.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:55 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:55.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:55 vm02 ceph-mon[50158]: Checking dashboard <-> RGW credentials 2026-03-10T06:58:55.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:55 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:55.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:55 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm02.iphfbm", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T06:58:55.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:55 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd='[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm02.iphfbm", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]': finished 2026-03-10T06:58:55.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:55 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:58:55.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:55 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:55.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:55 vm02 ceph-mon[54377]: pgmap v35: 161 pgs: 161 active+clean; 457 KiB data, 57 MiB used, 160 GiB / 160 GiB avail; 218 KiB/s rd, 7.0 KiB/s wr, 409 op/s 2026-03-10T06:58:55.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:55 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:55.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:55 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:55.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:55 vm02 ceph-mon[54377]: Checking dashboard <-> RGW credentials 2026-03-10T06:58:55.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:55 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:55.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:55 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm02.iphfbm", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T06:58:55.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:55 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd='[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm02.iphfbm", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]': finished 2026-03-10T06:58:55.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:55 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:58:55.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:55 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:56.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:55 vm05 ceph-mon[48591]: pgmap v35: 161 pgs: 161 active+clean; 457 KiB data, 57 MiB used, 160 GiB / 160 GiB avail; 218 KiB/s rd, 7.0 KiB/s wr, 409 op/s 2026-03-10T06:58:56.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:55 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:56.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:55 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:56.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:55 vm05 ceph-mon[48591]: Checking dashboard <-> RGW credentials 2026-03-10T06:58:56.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:55 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:56.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:55 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm02.iphfbm", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T06:58:56.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:55 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd='[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm02.iphfbm", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]': finished 2026-03-10T06:58:56.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:55 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:58:56.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:55 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:56.576 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 06:58:56 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[49719]: ::ffff:192.168.123.105 - - [10/Mar/2026:06:58:56] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-10T06:58:56.696 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:56 vm02 ceph-mon[54377]: Checking pool "foo" exists for service iscsi.foo 2026-03-10T06:58:56.696 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:56 vm02 ceph-mon[54377]: Deploying daemon iscsi.foo.vm02.iphfbm on vm02 2026-03-10T06:58:56.696 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:56 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:56.696 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:56 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:58:56.696 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:56 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:58:56.696 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:56 vm02 ceph-mon[54377]: mgrmap e20: y(active, since 36s), standbys: x 2026-03-10T06:58:56.697 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:56 vm02 ceph-mon[50158]: Checking pool "foo" exists for service iscsi.foo 2026-03-10T06:58:56.697 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:56 vm02 ceph-mon[50158]: Deploying daemon iscsi.foo.vm02.iphfbm on vm02 2026-03-10T06:58:56.697 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:56 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:56.697 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:56 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:58:56.697 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:56 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:58:56.697 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:56 vm02 ceph-mon[50158]: mgrmap e20: y(active, since 36s), standbys: x 2026-03-10T06:58:56.990 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:56 vm05 ceph-mon[48591]: Checking pool "foo" exists for service iscsi.foo 2026-03-10T06:58:56.990 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:56 vm05 ceph-mon[48591]: Deploying daemon iscsi.foo.vm02.iphfbm on vm02 2026-03-10T06:58:56.990 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:56 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:56.990 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:56 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:58:56.990 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:56 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:58:56.990 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:56 vm05 ceph-mon[48591]: mgrmap e20: y(active, since 36s), standbys: x 2026-03-10T06:58:57.785 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:57 vm02 ceph-mon[50158]: pgmap v36: 161 pgs: 161 active+clean; 457 KiB data, 75 MiB used, 160 GiB / 160 GiB avail; 273 KiB/s rd, 6.2 KiB/s wr, 479 op/s 2026-03-10T06:58:57.785 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:57 vm02 ceph-mon[50158]: from='client.? 192.168.123.102:0/1572505208' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-10T06:58:57.785 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:57 vm02 ceph-mon[50158]: from='client.? 192.168.123.102:0/16969816' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:6801/301535323"}]: dispatch 2026-03-10T06:58:57.785 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:57 vm02 ceph-mon[50158]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:6801/301535323"}]: dispatch 2026-03-10T06:58:57.785 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:57 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:57.785 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:57 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:57.785 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:57 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:57.785 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 06:58:57 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[50367]: ::ffff:192.168.123.105 - - [10/Mar/2026:06:58:57] "GET /metrics HTTP/1.1" 200 205884 "" "Prometheus/2.33.4" 2026-03-10T06:58:57.785 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:57 vm02 ceph-mon[54377]: pgmap v36: 161 pgs: 161 active+clean; 457 KiB data, 75 MiB used, 160 GiB / 160 GiB avail; 273 KiB/s rd, 6.2 KiB/s wr, 479 op/s 2026-03-10T06:58:57.785 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:57 vm02 ceph-mon[54377]: from='client.? 192.168.123.102:0/1572505208' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-10T06:58:57.785 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:57 vm02 ceph-mon[54377]: from='client.? 192.168.123.102:0/16969816' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:6801/301535323"}]: dispatch 2026-03-10T06:58:57.785 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:57 vm02 ceph-mon[54377]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:6801/301535323"}]: dispatch 2026-03-10T06:58:57.785 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:57 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:57.785 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:57 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:57.785 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:57 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:58.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:57 vm05 ceph-mon[48591]: pgmap v36: 161 pgs: 161 active+clean; 457 KiB data, 75 MiB used, 160 GiB / 160 GiB avail; 273 KiB/s rd, 6.2 KiB/s wr, 479 op/s 2026-03-10T06:58:58.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:57 vm05 ceph-mon[48591]: from='client.? 192.168.123.102:0/1572505208' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-10T06:58:58.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:57 vm05 ceph-mon[48591]: from='client.? 192.168.123.102:0/16969816' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:6801/301535323"}]: dispatch 2026-03-10T06:58:58.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:57 vm05 ceph-mon[48591]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:6801/301535323"}]: dispatch 2026-03-10T06:58:58.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:57 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:58.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:57 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:58.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:57 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:58.043 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 06:58:58 vm02 systemd[1]: Stopping Ceph alertmanager.a for 28bd35e6-1c4e-11f1-9057-21b3549603fc... 2026-03-10T06:58:58.305 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 06:58:58 vm02 bash[76858]: Error: no container with name or ID "ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager.a" found: no such container 2026-03-10T06:58:58.305 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 06:58:58 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[72075]: level=info ts=2026-03-10T06:58:58.091Z caller=main.go:557 msg="Received SIGTERM, exiting gracefully..." 2026-03-10T06:58:58.305 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 06:58:58 vm02 podman[76866]: 2026-03-10 06:58:58.104533854 +0000 UTC m=+0.030272655 container died 2a74a823ea779625c2a8c26a82aba689c2795e5b6e694521f0d350452a7a0d1b (image=quay.io/prometheus/alertmanager:v0.23.0, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a, maintainer=The Prometheus Authors ) 2026-03-10T06:58:58.305 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 06:58:58 vm02 podman[76866]: 2026-03-10 06:58:58.121397749 +0000 UTC m=+0.047136550 container remove 2a74a823ea779625c2a8c26a82aba689c2795e5b6e694521f0d350452a7a0d1b (image=quay.io/prometheus/alertmanager:v0.23.0, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a, maintainer=The Prometheus Authors ) 2026-03-10T06:58:58.306 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 06:58:58 vm02 podman[76866]: 2026-03-10 06:58:58.122479926 +0000 UTC m=+0.048218727 volume remove 1065237f66b2476c76ef74c22818c982e44656bb4ad7fcd42698521e44751f0f 2026-03-10T06:58:58.306 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 06:58:58 vm02 bash[76866]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a 2026-03-10T06:58:58.306 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 06:58:58 vm02 bash[76885]: Error: no container with name or ID "ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager.a" found: no such container 2026-03-10T06:58:58.306 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 06:58:58 vm02 systemd[1]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@alertmanager.a.service: Deactivated successfully. 2026-03-10T06:58:58.306 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 06:58:58 vm02 systemd[1]: Stopped Ceph alertmanager.a for 28bd35e6-1c4e-11f1-9057-21b3549603fc. 2026-03-10T06:58:58.306 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 06:58:58 vm02 systemd[1]: Starting Ceph alertmanager.a for 28bd35e6-1c4e-11f1-9057-21b3549603fc... 2026-03-10T06:58:58.306 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 06:58:58 vm02 podman[76924]: 2026-03-10 06:58:58.276573746 +0000 UTC m=+0.032886989 volume create 187dad9c09a9a22c5de6d79f934fdcc1defa9dbaac404cae84527bf63abb6672 2026-03-10T06:58:58.306 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 06:58:58 vm02 podman[76924]: 2026-03-10 06:58:58.280290373 +0000 UTC m=+0.036603616 container create 519616b4f7a0841745aeb5c02799efc9406a4e4cf1a8e913488325b523e9cf74 (image=quay.io/prometheus/alertmanager:v0.23.0, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a, maintainer=The Prometheus Authors ) 2026-03-10T06:58:58.585 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 06:58:58 vm02 podman[76924]: 2026-03-10 06:58:58.310883666 +0000 UTC m=+0.067196919 container init 519616b4f7a0841745aeb5c02799efc9406a4e4cf1a8e913488325b523e9cf74 (image=quay.io/prometheus/alertmanager:v0.23.0, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a, maintainer=The Prometheus Authors ) 2026-03-10T06:58:58.585 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 06:58:58 vm02 podman[76924]: 2026-03-10 06:58:58.314206176 +0000 UTC m=+0.070519419 container start 519616b4f7a0841745aeb5c02799efc9406a4e4cf1a8e913488325b523e9cf74 (image=quay.io/prometheus/alertmanager:v0.23.0, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a, maintainer=The Prometheus Authors ) 2026-03-10T06:58:58.585 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 06:58:58 vm02 bash[76924]: 519616b4f7a0841745aeb5c02799efc9406a4e4cf1a8e913488325b523e9cf74 2026-03-10T06:58:58.585 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 06:58:58 vm02 podman[76924]: 2026-03-10 06:58:58.254178096 +0000 UTC m=+0.010491349 image pull ba2b418f427c0636d654de8757e830c80168e76482bcc46bb2138e569d6c91d4 quay.io/prometheus/alertmanager:v0.23.0 2026-03-10T06:58:58.585 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 06:58:58 vm02 systemd[1]: Started Ceph alertmanager.a for 28bd35e6-1c4e-11f1-9057-21b3549603fc. 2026-03-10T06:58:58.585 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 06:58:58 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=info ts=2026-03-10T06:58:58.339Z caller=main.go:225 msg="Starting Alertmanager" version="(version=0.23.0, branch=HEAD, revision=61046b17771a57cfd4c4a51be370ab930a4d7d54)" 2026-03-10T06:58:58.585 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 06:58:58 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=info ts=2026-03-10T06:58:58.339Z caller=main.go:226 build_context="(go=go1.16.7, user=root@e21a959be8d2, date=20210825-10:48:55)" 2026-03-10T06:58:58.585 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 06:58:58 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=info ts=2026-03-10T06:58:58.340Z caller=cluster.go:184 component=cluster msg="setting advertise address explicitly" addr=192.168.123.102 port=9094 2026-03-10T06:58:58.585 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 06:58:58 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=info ts=2026-03-10T06:58:58.341Z caller=cluster.go:671 component=cluster msg="Waiting for gossip to settle..." interval=2s 2026-03-10T06:58:58.585 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 06:58:58 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=info ts=2026-03-10T06:58:58.366Z caller=coordinator.go:113 component=configuration msg="Loading configuration file" file=/etc/alertmanager/alertmanager.yml 2026-03-10T06:58:58.585 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 06:58:58 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=info ts=2026-03-10T06:58:58.366Z caller=coordinator.go:126 component=configuration msg="Completed loading of configuration file" file=/etc/alertmanager/alertmanager.yml 2026-03-10T06:58:58.585 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 06:58:58 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=info ts=2026-03-10T06:58:58.367Z caller=main.go:518 msg=Listening address=:9093 2026-03-10T06:58:58.585 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 06:58:58 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=info ts=2026-03-10T06:58:58.367Z caller=tls_config.go:191 msg="TLS is disabled." http2=false 2026-03-10T06:58:58.893 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:58 vm05 ceph-mon[48591]: Reconfiguring alertmanager.a (dependencies changed)... 2026-03-10T06:58:58.893 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:58 vm05 ceph-mon[48591]: Reconfiguring daemon alertmanager.a on vm02 2026-03-10T06:58:58.894 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:58 vm05 ceph-mon[48591]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:6801/301535323"}]': finished 2026-03-10T06:58:58.894 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:58 vm05 ceph-mon[48591]: osdmap e59: 8 total, 8 up, 8 in 2026-03-10T06:58:58.894 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:58 vm05 ceph-mon[48591]: from='client.? 192.168.123.102:0/4193253236' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/2239439801"}]: dispatch 2026-03-10T06:58:58.894 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:58 vm05 ceph-mon[48591]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/2239439801"}]: dispatch 2026-03-10T06:58:58.894 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:58 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:58.894 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 06:58:58 vm05 systemd[1]: Stopping Ceph prometheus.a for 28bd35e6-1c4e-11f1-9057-21b3549603fc... 2026-03-10T06:58:58.894 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 06:58:58 vm05 bash[66910]: Error: no container with name or ID "ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus.a" found: no such container 2026-03-10T06:58:58.894 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 06:58:58 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[63412]: ts=2026-03-10T06:58:58.836Z caller=main.go:775 level=warn msg="Received SIGTERM, exiting gracefully..." 2026-03-10T06:58:58.894 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 06:58:58 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[63412]: ts=2026-03-10T06:58:58.837Z caller=main.go:798 level=info msg="Stopping scrape discovery manager..." 2026-03-10T06:58:58.894 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 06:58:58 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[63412]: ts=2026-03-10T06:58:58.837Z caller=main.go:812 level=info msg="Stopping notify discovery manager..." 2026-03-10T06:58:58.894 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 06:58:58 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[63412]: ts=2026-03-10T06:58:58.837Z caller=main.go:834 level=info msg="Stopping scrape manager..." 2026-03-10T06:58:58.894 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 06:58:58 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[63412]: ts=2026-03-10T06:58:58.837Z caller=main.go:794 level=info msg="Scrape discovery manager stopped" 2026-03-10T06:58:58.894 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 06:58:58 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[63412]: ts=2026-03-10T06:58:58.837Z caller=main.go:808 level=info msg="Notify discovery manager stopped" 2026-03-10T06:58:58.894 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 06:58:58 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[63412]: ts=2026-03-10T06:58:58.837Z caller=manager.go:945 level=info component="rule manager" msg="Stopping rule manager..." 2026-03-10T06:58:58.894 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 06:58:58 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[63412]: ts=2026-03-10T06:58:58.838Z caller=manager.go:955 level=info component="rule manager" msg="Rule manager stopped" 2026-03-10T06:58:58.894 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 06:58:58 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[63412]: ts=2026-03-10T06:58:58.838Z caller=main.go:828 level=info msg="Scrape manager stopped" 2026-03-10T06:58:58.894 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 06:58:58 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[63412]: ts=2026-03-10T06:58:58.839Z caller=notifier.go:600 level=info component=notifier msg="Stopping notification manager..." 2026-03-10T06:58:58.894 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 06:58:58 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[63412]: ts=2026-03-10T06:58:58.839Z caller=main.go:1054 level=info msg="Notifier manager stopped" 2026-03-10T06:58:58.894 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 06:58:58 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[63412]: ts=2026-03-10T06:58:58.839Z caller=main.go:1066 level=info msg="See you next time!" 2026-03-10T06:58:58.894 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 06:58:58 vm05 podman[66917]: 2026-03-10 06:58:58.847253814 +0000 UTC m=+0.027141981 container died cd9aa5691228f6f07e7b0ea4f324cd770f3fe97915f24269ab87f4d2de4406e2 (image=quay.io/prometheus/prometheus:v2.33.4, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-10T06:58:58.894 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 06:58:58 vm05 podman[66917]: 2026-03-10 06:58:58.86552959 +0000 UTC m=+0.045417757 container remove cd9aa5691228f6f07e7b0ea4f324cd770f3fe97915f24269ab87f4d2de4406e2 (image=quay.io/prometheus/prometheus:v2.33.4, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-10T06:58:58.894 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 06:58:58 vm05 bash[66917]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a 2026-03-10T06:58:58.894 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 06:58:58 vm05 bash[66936]: Error: no container with name or ID "ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus.a" found: no such container 2026-03-10T06:58:58.894 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 06:58:58 vm05 systemd[1]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@prometheus.a.service: Deactivated successfully. 2026-03-10T06:58:58.894 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 06:58:58 vm05 systemd[1]: Stopped Ceph prometheus.a for 28bd35e6-1c4e-11f1-9057-21b3549603fc. 2026-03-10T06:58:59.084 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:58 vm02 ceph-mon[54377]: Reconfiguring alertmanager.a (dependencies changed)... 2026-03-10T06:58:59.084 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:58 vm02 ceph-mon[54377]: Reconfiguring daemon alertmanager.a on vm02 2026-03-10T06:58:59.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:58 vm02 ceph-mon[54377]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:6801/301535323"}]': finished 2026-03-10T06:58:59.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:58 vm02 ceph-mon[54377]: osdmap e59: 8 total, 8 up, 8 in 2026-03-10T06:58:59.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:58 vm02 ceph-mon[54377]: from='client.? 192.168.123.102:0/4193253236' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/2239439801"}]: dispatch 2026-03-10T06:58:59.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:58 vm02 ceph-mon[54377]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/2239439801"}]: dispatch 2026-03-10T06:58:59.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:58 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:59.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:58 vm02 ceph-mon[50158]: Reconfiguring alertmanager.a (dependencies changed)... 2026-03-10T06:58:59.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:58 vm02 ceph-mon[50158]: Reconfiguring daemon alertmanager.a on vm02 2026-03-10T06:58:59.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:58 vm02 ceph-mon[50158]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:6801/301535323"}]': finished 2026-03-10T06:58:59.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:58 vm02 ceph-mon[50158]: osdmap e59: 8 total, 8 up, 8 in 2026-03-10T06:58:59.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:58 vm02 ceph-mon[50158]: from='client.? 192.168.123.102:0/4193253236' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/2239439801"}]: dispatch 2026-03-10T06:58:59.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:58 vm02 ceph-mon[50158]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/2239439801"}]: dispatch 2026-03-10T06:58:59.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:58 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:59.195 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 06:58:58 vm05 systemd[1]: Starting Ceph prometheus.a for 28bd35e6-1c4e-11f1-9057-21b3549603fc... 2026-03-10T06:58:59.195 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 06:58:59 vm05 podman[66978]: 2026-03-10 06:58:59.018647412 +0000 UTC m=+0.021467681 container create 354a9ecb0815a137ed2d00f9321e921ea5632003e6d2d9ad2c230c60a8429c7a (image=quay.io/prometheus/prometheus:v2.33.4, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-10T06:58:59.195 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 06:58:59 vm05 podman[66978]: 2026-03-10 06:58:59.051008841 +0000 UTC m=+0.053829110 container init 354a9ecb0815a137ed2d00f9321e921ea5632003e6d2d9ad2c230c60a8429c7a (image=quay.io/prometheus/prometheus:v2.33.4, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-10T06:58:59.195 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 06:58:59 vm05 podman[66978]: 2026-03-10 06:58:59.053939134 +0000 UTC m=+0.056759393 container start 354a9ecb0815a137ed2d00f9321e921ea5632003e6d2d9ad2c230c60a8429c7a (image=quay.io/prometheus/prometheus:v2.33.4, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-10T06:58:59.195 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 06:58:59 vm05 bash[66978]: 354a9ecb0815a137ed2d00f9321e921ea5632003e6d2d9ad2c230c60a8429c7a 2026-03-10T06:58:59.195 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 06:58:59 vm05 podman[66978]: 2026-03-10 06:58:59.009807686 +0000 UTC m=+0.012627965 image pull 514e6a882f6e74806a5856468489eeff8d7106095557578da96935e4d0ba4d9d quay.io/prometheus/prometheus:v2.33.4 2026-03-10T06:58:59.195 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 06:58:59 vm05 systemd[1]: Started Ceph prometheus.a for 28bd35e6-1c4e-11f1-9057-21b3549603fc. 2026-03-10T06:58:59.195 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 06:58:59 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[66988]: ts=2026-03-10T06:58:59.098Z caller=main.go:475 level=info msg="No time or size retention was set so using the default time retention" duration=15d 2026-03-10T06:58:59.195 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 06:58:59 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[66988]: ts=2026-03-10T06:58:59.098Z caller=main.go:512 level=info msg="Starting Prometheus" version="(version=2.33.4, branch=HEAD, revision=83032011a5d3e6102624fe58241a374a7201fee8)" 2026-03-10T06:58:59.195 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 06:58:59 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[66988]: ts=2026-03-10T06:58:59.098Z caller=main.go:517 level=info build_context="(go=go1.17.7, user=root@d13bf69e7be8, date=20220222-16:51:28)" 2026-03-10T06:58:59.195 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 06:58:59 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[66988]: ts=2026-03-10T06:58:59.098Z caller=main.go:518 level=info host_details="(Linux 5.14.0-686.el9.x86_64 #1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026 x86_64 vm05 (none))" 2026-03-10T06:58:59.195 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 06:58:59 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[66988]: ts=2026-03-10T06:58:59.099Z caller=main.go:519 level=info fd_limits="(soft=1048576, hard=1048576)" 2026-03-10T06:58:59.195 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 06:58:59 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[66988]: ts=2026-03-10T06:58:59.099Z caller=main.go:520 level=info vm_limits="(soft=unlimited, hard=unlimited)" 2026-03-10T06:58:59.195 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 06:58:59 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[66988]: ts=2026-03-10T06:58:59.102Z caller=web.go:570 level=info component=web msg="Start listening for connections" address=:9095 2026-03-10T06:58:59.195 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 06:58:59 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[66988]: ts=2026-03-10T06:58:59.103Z caller=main.go:923 level=info msg="Starting TSDB ..." 2026-03-10T06:58:59.195 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 06:58:59 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[66988]: ts=2026-03-10T06:58:59.103Z caller=tls_config.go:195 level=info component=web msg="TLS is disabled." http2=false 2026-03-10T06:58:59.195 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 06:58:59 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[66988]: ts=2026-03-10T06:58:59.105Z caller=head.go:493 level=info component=tsdb msg="Replaying on-disk memory mappable chunks if any" 2026-03-10T06:58:59.195 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 06:58:59 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[66988]: ts=2026-03-10T06:58:59.105Z caller=head.go:527 level=info component=tsdb msg="On-disk memory mappable chunks replay completed" duration=1.082µs 2026-03-10T06:58:59.195 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 06:58:59 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[66988]: ts=2026-03-10T06:58:59.105Z caller=head.go:533 level=info component=tsdb msg="Replaying WAL, this may take a while" 2026-03-10T06:58:59.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:59 vm02 ceph-mon[54377]: pgmap v38: 161 pgs: 161 active+clean; 457 KiB data, 92 MiB used, 160 GiB / 160 GiB avail; 345 KiB/s rd, 5.5 KiB/s wr, 585 op/s 2026-03-10T06:58:59.837 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:59 vm02 ceph-mon[54377]: Reconfiguring prometheus.a (dependencies changed)... 2026-03-10T06:58:59.837 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:59 vm02 ceph-mon[54377]: Reconfiguring daemon prometheus.a on vm05 2026-03-10T06:58:59.837 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:59 vm02 ceph-mon[54377]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/2239439801"}]': finished 2026-03-10T06:58:59.837 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:59 vm02 ceph-mon[54377]: osdmap e60: 8 total, 8 up, 8 in 2026-03-10T06:58:59.837 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:59 vm02 ceph-mon[54377]: from='client.? 192.168.123.102:0/2022859286' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:6800/301535323"}]: dispatch 2026-03-10T06:58:59.837 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:59 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:59.837 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:59 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-10T06:58:59.837 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:59 vm02 ceph-mon[54377]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-10T06:58:59.837 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:59 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://192.168.123.102:9093"}]: dispatch 2026-03-10T06:58:59.837 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:59 vm02 ceph-mon[54377]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://192.168.123.102:9093"}]: dispatch 2026-03-10T06:58:59.837 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:59 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:59.837 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:59 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-10T06:58:59.837 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:59 vm02 ceph-mon[54377]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-10T06:58:59.837 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:59 vm02 ceph-mon[54377]: Adding iSCSI gateway http://:@192.168.123.102:5000 to Dashboard 2026-03-10T06:58:59.837 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:59 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-10T06:58:59.837 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:59 vm02 ceph-mon[54377]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-10T06:58:59.837 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:59 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:59.837 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:59 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "dashboard iscsi-gateway-add", "name": "vm02"}]: dispatch 2026-03-10T06:58:59.837 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:59 vm02 ceph-mon[54377]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-add", "name": "vm02"}]: dispatch 2026-03-10T06:58:59.837 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:59 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:59.837 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:59 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T06:58:59.837 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:59 vm02 ceph-mon[54377]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T06:58:59.837 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:59 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://192.168.123.105:9095"}]: dispatch 2026-03-10T06:58:59.837 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:59 vm02 ceph-mon[54377]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://192.168.123.105:9095"}]: dispatch 2026-03-10T06:58:59.837 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:59 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:59.837 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:59 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-10T06:58:59.837 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:59 vm02 ceph-mon[54377]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-10T06:58:59.837 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:59 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://192.168.123.105:3000"}]: dispatch 2026-03-10T06:58:59.837 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:59 vm02 ceph-mon[54377]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://192.168.123.105:3000"}]: dispatch 2026-03-10T06:58:59.837 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:59 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:59.837 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:59 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:58:59.837 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:59 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:58:59.837 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:59 vm02 ceph-mon[54377]: from='client.? 192.168.123.102:0/2022859286' entity='client.iscsi.foo.vm02.iphfbm' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:6800/301535323"}]': finished 2026-03-10T06:58:59.837 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:59 vm02 ceph-mon[54377]: osdmap e61: 8 total, 8 up, 8 in 2026-03-10T06:58:59.837 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:59 vm02 ceph-mon[54377]: from='client.? 192.168.123.102:0/757914830' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/2777838814"}]: dispatch 2026-03-10T06:58:59.837 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:58:59 vm02 ceph-mon[54377]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/2777838814"}]: dispatch 2026-03-10T06:58:59.837 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:59 vm02 ceph-mon[50158]: pgmap v38: 161 pgs: 161 active+clean; 457 KiB data, 92 MiB used, 160 GiB / 160 GiB avail; 345 KiB/s rd, 5.5 KiB/s wr, 585 op/s 2026-03-10T06:58:59.837 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:59 vm02 ceph-mon[50158]: Reconfiguring prometheus.a (dependencies changed)... 2026-03-10T06:58:59.837 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:59 vm02 ceph-mon[50158]: Reconfiguring daemon prometheus.a on vm05 2026-03-10T06:58:59.837 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:59 vm02 ceph-mon[50158]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/2239439801"}]': finished 2026-03-10T06:58:59.837 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:59 vm02 ceph-mon[50158]: osdmap e60: 8 total, 8 up, 8 in 2026-03-10T06:58:59.838 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:59 vm02 ceph-mon[50158]: from='client.? 192.168.123.102:0/2022859286' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:6800/301535323"}]: dispatch 2026-03-10T06:58:59.838 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:59 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:59.838 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:59 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-10T06:58:59.838 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:59 vm02 ceph-mon[50158]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-10T06:58:59.838 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:59 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://192.168.123.102:9093"}]: dispatch 2026-03-10T06:58:59.838 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:59 vm02 ceph-mon[50158]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://192.168.123.102:9093"}]: dispatch 2026-03-10T06:58:59.838 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:59 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:59.838 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:59 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-10T06:58:59.838 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:59 vm02 ceph-mon[50158]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-10T06:58:59.838 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:59 vm02 ceph-mon[50158]: Adding iSCSI gateway http://:@192.168.123.102:5000 to Dashboard 2026-03-10T06:58:59.838 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:59 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-10T06:58:59.838 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:59 vm02 ceph-mon[50158]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-10T06:58:59.838 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:59 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:59.838 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:59 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "dashboard iscsi-gateway-add", "name": "vm02"}]: dispatch 2026-03-10T06:58:59.838 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:59 vm02 ceph-mon[50158]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-add", "name": "vm02"}]: dispatch 2026-03-10T06:58:59.838 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:59 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:59.838 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:59 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T06:58:59.838 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:59 vm02 ceph-mon[50158]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T06:58:59.838 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:59 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://192.168.123.105:9095"}]: dispatch 2026-03-10T06:58:59.838 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:59 vm02 ceph-mon[50158]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://192.168.123.105:9095"}]: dispatch 2026-03-10T06:58:59.838 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:59 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:59.838 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:59 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-10T06:58:59.838 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:59 vm02 ceph-mon[50158]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-10T06:58:59.838 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:59 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://192.168.123.105:3000"}]: dispatch 2026-03-10T06:58:59.838 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:59 vm02 ceph-mon[50158]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://192.168.123.105:3000"}]: dispatch 2026-03-10T06:58:59.838 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:59 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:58:59.838 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:59 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:58:59.838 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:59 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:58:59.838 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:59 vm02 ceph-mon[50158]: from='client.? 192.168.123.102:0/2022859286' entity='client.iscsi.foo.vm02.iphfbm' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:6800/301535323"}]': finished 2026-03-10T06:58:59.838 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:59 vm02 ceph-mon[50158]: osdmap e61: 8 total, 8 up, 8 in 2026-03-10T06:58:59.838 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:59 vm02 ceph-mon[50158]: from='client.? 192.168.123.102:0/757914830' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/2777838814"}]: dispatch 2026-03-10T06:58:59.838 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:58:59 vm02 ceph-mon[50158]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/2777838814"}]: dispatch 2026-03-10T06:59:00.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:59 vm05 ceph-mon[48591]: pgmap v38: 161 pgs: 161 active+clean; 457 KiB data, 92 MiB used, 160 GiB / 160 GiB avail; 345 KiB/s rd, 5.5 KiB/s wr, 585 op/s 2026-03-10T06:59:00.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:59 vm05 ceph-mon[48591]: Reconfiguring prometheus.a (dependencies changed)... 2026-03-10T06:59:00.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:59 vm05 ceph-mon[48591]: Reconfiguring daemon prometheus.a on vm05 2026-03-10T06:59:00.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:59 vm05 ceph-mon[48591]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/2239439801"}]': finished 2026-03-10T06:59:00.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:59 vm05 ceph-mon[48591]: osdmap e60: 8 total, 8 up, 8 in 2026-03-10T06:59:00.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:59 vm05 ceph-mon[48591]: from='client.? 192.168.123.102:0/2022859286' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:6800/301535323"}]: dispatch 2026-03-10T06:59:00.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:59 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:59:00.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:59 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-10T06:59:00.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:59 vm05 ceph-mon[48591]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-10T06:59:00.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:59 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://192.168.123.102:9093"}]: dispatch 2026-03-10T06:59:00.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:59 vm05 ceph-mon[48591]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://192.168.123.102:9093"}]: dispatch 2026-03-10T06:59:00.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:59 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:59:00.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:59 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-10T06:59:00.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:59 vm05 ceph-mon[48591]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-10T06:59:00.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:59 vm05 ceph-mon[48591]: Adding iSCSI gateway http://:@192.168.123.102:5000 to Dashboard 2026-03-10T06:59:00.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:59 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-10T06:59:00.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:59 vm05 ceph-mon[48591]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-10T06:59:00.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:59 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:59:00.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:59 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "dashboard iscsi-gateway-add", "name": "vm02"}]: dispatch 2026-03-10T06:59:00.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:59 vm05 ceph-mon[48591]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-add", "name": "vm02"}]: dispatch 2026-03-10T06:59:00.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:59 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:59:00.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:59 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T06:59:00.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:59 vm05 ceph-mon[48591]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T06:59:00.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:59 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://192.168.123.105:9095"}]: dispatch 2026-03-10T06:59:00.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:59 vm05 ceph-mon[48591]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://192.168.123.105:9095"}]: dispatch 2026-03-10T06:59:00.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:59 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:59:00.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:59 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-10T06:59:00.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:59 vm05 ceph-mon[48591]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-10T06:59:00.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:59 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://192.168.123.105:3000"}]: dispatch 2026-03-10T06:59:00.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:59 vm05 ceph-mon[48591]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://192.168.123.105:3000"}]: dispatch 2026-03-10T06:59:00.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:59 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:59:00.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:59 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:59:00.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:59 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:59:00.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:59 vm05 ceph-mon[48591]: from='client.? 192.168.123.102:0/2022859286' entity='client.iscsi.foo.vm02.iphfbm' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:6800/301535323"}]': finished 2026-03-10T06:59:00.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:59 vm05 ceph-mon[48591]: osdmap e61: 8 total, 8 up, 8 in 2026-03-10T06:59:00.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:59 vm05 ceph-mon[48591]: from='client.? 192.168.123.102:0/757914830' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/2777838814"}]: dispatch 2026-03-10T06:59:00.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:58:59 vm05 ceph-mon[48591]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/2777838814"}]: dispatch 2026-03-10T06:59:00.835 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 06:59:00 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=info ts=2026-03-10T06:59:00.341Z caller=cluster.go:696 component=cluster msg="gossip not settled" polls=0 before=0 now=1 elapsed=2.00069588s 2026-03-10T06:59:01.003 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 06:59:00 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[66988]: ts=2026-03-10T06:59:00.565Z caller=head.go:604 level=info component=tsdb msg="WAL segment loaded" segment=0 maxSegment=1 2026-03-10T06:59:01.003 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 06:59:00 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[66988]: ts=2026-03-10T06:59:00.566Z caller=head.go:604 level=info component=tsdb msg="WAL segment loaded" segment=1 maxSegment=1 2026-03-10T06:59:01.003 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 06:59:00 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[66988]: ts=2026-03-10T06:59:00.566Z caller=head.go:610 level=info component=tsdb msg="WAL replay completed" checkpoint_replay_duration=111.779µs wal_replay_duration=1.460890019s total_replay_duration=1.461011726s 2026-03-10T06:59:01.003 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 06:59:00 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[66988]: ts=2026-03-10T06:59:00.568Z caller=main.go:944 level=info fs_type=XFS_SUPER_MAGIC 2026-03-10T06:59:01.003 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 06:59:00 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[66988]: ts=2026-03-10T06:59:00.568Z caller=main.go:947 level=info msg="TSDB started" 2026-03-10T06:59:01.003 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 06:59:00 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[66988]: ts=2026-03-10T06:59:00.568Z caller=main.go:1128 level=info msg="Loading configuration file" filename=/etc/prometheus/prometheus.yml 2026-03-10T06:59:01.003 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 06:59:00 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[66988]: ts=2026-03-10T06:59:00.581Z caller=main.go:1165 level=info msg="Completed loading of configuration file" filename=/etc/prometheus/prometheus.yml totalDuration=13.59731ms db_storage=782ns remote_storage=1.403µs web_handler=441ns query_engine=532ns scrape=431.088µs scrape_sd=19.396µs notify=39.644µs notify_sd=6.913µs rules=12.854318ms 2026-03-10T06:59:01.003 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 06:59:00 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[66988]: ts=2026-03-10T06:59:00.581Z caller=main.go:896 level=info msg="Server is ready to receive web requests." 2026-03-10T06:59:01.319 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:01 vm05 ceph-mon[48591]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/2777838814"}]': finished 2026-03-10T06:59:01.319 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:01 vm05 ceph-mon[48591]: osdmap e62: 8 total, 8 up, 8 in 2026-03-10T06:59:01.319 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:01 vm05 ceph-mon[48591]: pgmap v42: 161 pgs: 161 active+clean; 457 KiB data, 92 MiB used, 160 GiB / 160 GiB avail; 193 KiB/s rd, 255 B/s wr, 300 op/s 2026-03-10T06:59:01.319 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:01 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:59:01.319 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:01 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:59:01.319 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:01 vm05 ceph-mon[48591]: from='client.? 192.168.123.102:0/86359110' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/1263130046"}]: dispatch 2026-03-10T06:59:01.320 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:01 vm05 ceph-mon[48591]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/1263130046"}]: dispatch 2026-03-10T06:59:01.320 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:01 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:59:01.320 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:01 vm05 ceph-mon[48591]: Checking dashboard <-> RGW credentials 2026-03-10T06:59:01.320 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:01 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:59:01.320 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:01 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:59:01.320 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:01 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:59:01.321 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:01 vm02 ceph-mon[54377]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/2777838814"}]': finished 2026-03-10T06:59:01.321 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:01 vm02 ceph-mon[54377]: osdmap e62: 8 total, 8 up, 8 in 2026-03-10T06:59:01.321 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:01 vm02 ceph-mon[54377]: pgmap v42: 161 pgs: 161 active+clean; 457 KiB data, 92 MiB used, 160 GiB / 160 GiB avail; 193 KiB/s rd, 255 B/s wr, 300 op/s 2026-03-10T06:59:01.321 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:01 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:59:01.321 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:01 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:59:01.321 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:01 vm02 ceph-mon[54377]: from='client.? 192.168.123.102:0/86359110' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/1263130046"}]: dispatch 2026-03-10T06:59:01.321 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:01 vm02 ceph-mon[54377]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/1263130046"}]: dispatch 2026-03-10T06:59:01.321 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:01 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:59:01.321 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:01 vm02 ceph-mon[54377]: Checking dashboard <-> RGW credentials 2026-03-10T06:59:01.321 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:01 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:59:01.321 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:01 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:59:01.321 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:01 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:59:01.321 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:01 vm02 ceph-mon[50158]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/2777838814"}]': finished 2026-03-10T06:59:01.321 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:01 vm02 ceph-mon[50158]: osdmap e62: 8 total, 8 up, 8 in 2026-03-10T06:59:01.321 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:01 vm02 ceph-mon[50158]: pgmap v42: 161 pgs: 161 active+clean; 457 KiB data, 92 MiB used, 160 GiB / 160 GiB avail; 193 KiB/s rd, 255 B/s wr, 300 op/s 2026-03-10T06:59:01.321 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:01 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:59:01.321 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:01 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:59:01.321 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:01 vm02 ceph-mon[50158]: from='client.? 192.168.123.102:0/86359110' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/1263130046"}]: dispatch 2026-03-10T06:59:01.321 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:01 vm02 ceph-mon[50158]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/1263130046"}]: dispatch 2026-03-10T06:59:01.322 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:01 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:59:01.322 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:01 vm02 ceph-mon[50158]: Checking dashboard <-> RGW credentials 2026-03-10T06:59:01.322 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:01 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:59:01.322 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:01 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T06:59:01.322 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:01 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T06:59:02.444 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:02 vm05 ceph-mon[48591]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/1263130046"}]': finished 2026-03-10T06:59:02.444 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:02 vm05 ceph-mon[48591]: osdmap e63: 8 total, 8 up, 8 in 2026-03-10T06:59:02.444 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:02 vm05 ceph-mon[48591]: from='client.? 192.168.123.102:0/1344347875' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/4171767778"}]: dispatch 2026-03-10T06:59:02.444 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:02 vm05 ceph-mon[48591]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/4171767778"}]: dispatch 2026-03-10T06:59:02.444 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:02 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:59:02.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:02 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:59:02.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:02 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:59:02.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:02 vm02 ceph-mon[54377]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/1263130046"}]': finished 2026-03-10T06:59:02.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:02 vm02 ceph-mon[54377]: osdmap e63: 8 total, 8 up, 8 in 2026-03-10T06:59:02.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:02 vm02 ceph-mon[54377]: from='client.? 192.168.123.102:0/1344347875' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/4171767778"}]: dispatch 2026-03-10T06:59:02.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:02 vm02 ceph-mon[54377]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/4171767778"}]: dispatch 2026-03-10T06:59:02.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:02 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:59:02.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:02 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:59:02.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:02 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:59:02.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:02 vm02 ceph-mon[50158]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/1263130046"}]': finished 2026-03-10T06:59:02.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:02 vm02 ceph-mon[50158]: osdmap e63: 8 total, 8 up, 8 in 2026-03-10T06:59:02.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:02 vm02 ceph-mon[50158]: from='client.? 192.168.123.102:0/1344347875' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/4171767778"}]: dispatch 2026-03-10T06:59:02.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:02 vm02 ceph-mon[50158]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/4171767778"}]: dispatch 2026-03-10T06:59:02.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:02 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:59:02.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:02 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:59:02.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:02 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T06:59:03.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:03 vm05 ceph-mon[48591]: pgmap v44: 161 pgs: 161 active+clean; 457 KiB data, 94 MiB used, 160 GiB / 160 GiB avail; 9.2 KiB/s rd, 255 B/s wr, 11 op/s 2026-03-10T06:59:03.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:03 vm05 ceph-mon[48591]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/4171767778"}]': finished 2026-03-10T06:59:03.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:03 vm05 ceph-mon[48591]: osdmap e64: 8 total, 8 up, 8 in 2026-03-10T06:59:03.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:03 vm05 ceph-mon[48591]: from='client.? 192.168.123.102:0/1730668548' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:6801/1096882102"}]: dispatch 2026-03-10T06:59:03.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:03 vm05 ceph-mon[48591]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:6801/1096882102"}]: dispatch 2026-03-10T06:59:03.834 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:03 vm02 ceph-mon[50158]: pgmap v44: 161 pgs: 161 active+clean; 457 KiB data, 94 MiB used, 160 GiB / 160 GiB avail; 9.2 KiB/s rd, 255 B/s wr, 11 op/s 2026-03-10T06:59:03.834 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:03 vm02 ceph-mon[50158]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/4171767778"}]': finished 2026-03-10T06:59:03.834 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:03 vm02 ceph-mon[50158]: osdmap e64: 8 total, 8 up, 8 in 2026-03-10T06:59:03.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:03 vm02 ceph-mon[50158]: from='client.? 192.168.123.102:0/1730668548' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:6801/1096882102"}]: dispatch 2026-03-10T06:59:03.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:03 vm02 ceph-mon[50158]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:6801/1096882102"}]: dispatch 2026-03-10T06:59:03.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:03 vm02 ceph-mon[54377]: pgmap v44: 161 pgs: 161 active+clean; 457 KiB data, 94 MiB used, 160 GiB / 160 GiB avail; 9.2 KiB/s rd, 255 B/s wr, 11 op/s 2026-03-10T06:59:03.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:03 vm02 ceph-mon[54377]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/4171767778"}]': finished 2026-03-10T06:59:03.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:03 vm02 ceph-mon[54377]: osdmap e64: 8 total, 8 up, 8 in 2026-03-10T06:59:03.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:03 vm02 ceph-mon[54377]: from='client.? 192.168.123.102:0/1730668548' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:6801/1096882102"}]: dispatch 2026-03-10T06:59:03.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:03 vm02 ceph-mon[54377]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:6801/1096882102"}]: dispatch 2026-03-10T06:59:05.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:04 vm05 ceph-mon[48591]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:6801/1096882102"}]': finished 2026-03-10T06:59:05.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:04 vm05 ceph-mon[48591]: osdmap e65: 8 total, 8 up, 8 in 2026-03-10T06:59:05.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:04 vm05 ceph-mon[48591]: from='client.? 192.168.123.102:0/755078748' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:6800/1096882102"}]: dispatch 2026-03-10T06:59:05.084 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:04 vm02 ceph-mon[54377]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:6801/1096882102"}]': finished 2026-03-10T06:59:05.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:04 vm02 ceph-mon[54377]: osdmap e65: 8 total, 8 up, 8 in 2026-03-10T06:59:05.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:04 vm02 ceph-mon[54377]: from='client.? 192.168.123.102:0/755078748' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:6800/1096882102"}]: dispatch 2026-03-10T06:59:05.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:04 vm02 ceph-mon[50158]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:6801/1096882102"}]': finished 2026-03-10T06:59:05.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:04 vm02 ceph-mon[50158]: osdmap e65: 8 total, 8 up, 8 in 2026-03-10T06:59:05.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:04 vm02 ceph-mon[50158]: from='client.? 192.168.123.102:0/755078748' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:6800/1096882102"}]: dispatch 2026-03-10T06:59:06.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:05 vm05 ceph-mon[48591]: pgmap v47: 161 pgs: 161 active+clean; 457 KiB data, 94 MiB used, 160 GiB / 160 GiB avail; 9.2 KiB/s rd, 255 B/s wr, 11 op/s 2026-03-10T06:59:06.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:05 vm05 ceph-mon[48591]: from='client.? 192.168.123.102:0/755078748' entity='client.iscsi.foo.vm02.iphfbm' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:6800/1096882102"}]': finished 2026-03-10T06:59:06.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:05 vm05 ceph-mon[48591]: osdmap e66: 8 total, 8 up, 8 in 2026-03-10T06:59:06.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:05 vm05 ceph-mon[48591]: from='client.? 192.168.123.102:0/1900968636' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/2455034139"}]: dispatch 2026-03-10T06:59:06.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:05 vm02 ceph-mon[50158]: pgmap v47: 161 pgs: 161 active+clean; 457 KiB data, 94 MiB used, 160 GiB / 160 GiB avail; 9.2 KiB/s rd, 255 B/s wr, 11 op/s 2026-03-10T06:59:06.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:05 vm02 ceph-mon[50158]: from='client.? 192.168.123.102:0/755078748' entity='client.iscsi.foo.vm02.iphfbm' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:6800/1096882102"}]': finished 2026-03-10T06:59:06.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:05 vm02 ceph-mon[50158]: osdmap e66: 8 total, 8 up, 8 in 2026-03-10T06:59:06.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:05 vm02 ceph-mon[50158]: from='client.? 192.168.123.102:0/1900968636' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/2455034139"}]: dispatch 2026-03-10T06:59:06.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:05 vm02 ceph-mon[54377]: pgmap v47: 161 pgs: 161 active+clean; 457 KiB data, 94 MiB used, 160 GiB / 160 GiB avail; 9.2 KiB/s rd, 255 B/s wr, 11 op/s 2026-03-10T06:59:06.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:05 vm02 ceph-mon[54377]: from='client.? 192.168.123.102:0/755078748' entity='client.iscsi.foo.vm02.iphfbm' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:6800/1096882102"}]': finished 2026-03-10T06:59:06.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:05 vm02 ceph-mon[54377]: osdmap e66: 8 total, 8 up, 8 in 2026-03-10T06:59:06.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:05 vm02 ceph-mon[54377]: from='client.? 192.168.123.102:0/1900968636' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/2455034139"}]: dispatch 2026-03-10T06:59:06.753 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 06:59:06 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[49719]: ::ffff:192.168.123.105 - - [10/Mar/2026:06:59:06] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-10T06:59:07.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:06 vm02 ceph-mon[54377]: from='client.? 192.168.123.102:0/1900968636' entity='client.iscsi.foo.vm02.iphfbm' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/2455034139"}]': finished 2026-03-10T06:59:07.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:06 vm02 ceph-mon[54377]: osdmap e67: 8 total, 8 up, 8 in 2026-03-10T06:59:07.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:06 vm02 ceph-mon[54377]: from='client.? 192.168.123.102:0/544320925' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:6800/3034129265"}]: dispatch 2026-03-10T06:59:07.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:06 vm02 ceph-mon[50158]: from='client.? 192.168.123.102:0/1900968636' entity='client.iscsi.foo.vm02.iphfbm' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/2455034139"}]': finished 2026-03-10T06:59:07.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:06 vm02 ceph-mon[50158]: osdmap e67: 8 total, 8 up, 8 in 2026-03-10T06:59:07.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:06 vm02 ceph-mon[50158]: from='client.? 192.168.123.102:0/544320925' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:6800/3034129265"}]: dispatch 2026-03-10T06:59:07.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:06 vm05 ceph-mon[48591]: from='client.? 192.168.123.102:0/1900968636' entity='client.iscsi.foo.vm02.iphfbm' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/2455034139"}]': finished 2026-03-10T06:59:07.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:06 vm05 ceph-mon[48591]: osdmap e67: 8 total, 8 up, 8 in 2026-03-10T06:59:07.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:06 vm05 ceph-mon[48591]: from='client.? 192.168.123.102:0/544320925' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:6800/3034129265"}]: dispatch 2026-03-10T06:59:08.084 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:07 vm02 ceph-mon[54377]: pgmap v50: 161 pgs: 161 active+clean; 457 KiB data, 94 MiB used, 160 GiB / 160 GiB avail 2026-03-10T06:59:08.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:07 vm02 ceph-mon[54377]: from='client.14784 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T06:59:08.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:07 vm02 ceph-mon[54377]: from='client.? 192.168.123.102:0/544320925' entity='client.iscsi.foo.vm02.iphfbm' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:6800/3034129265"}]': finished 2026-03-10T06:59:08.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:07 vm02 ceph-mon[54377]: osdmap e68: 8 total, 8 up, 8 in 2026-03-10T06:59:08.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:07 vm02 ceph-mon[54377]: from='client.? 192.168.123.102:0/2903672123' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/3993166382"}]: dispatch 2026-03-10T06:59:08.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:07 vm02 ceph-mon[54377]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/3993166382"}]: dispatch 2026-03-10T06:59:08.085 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 06:59:07 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[50367]: ::ffff:192.168.123.105 - - [10/Mar/2026:06:59:07] "GET /metrics HTTP/1.1" 200 214512 "" "Prometheus/2.33.4" 2026-03-10T06:59:08.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:07 vm02 ceph-mon[50158]: pgmap v50: 161 pgs: 161 active+clean; 457 KiB data, 94 MiB used, 160 GiB / 160 GiB avail 2026-03-10T06:59:08.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:07 vm02 ceph-mon[50158]: from='client.14784 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T06:59:08.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:07 vm02 ceph-mon[50158]: from='client.? 192.168.123.102:0/544320925' entity='client.iscsi.foo.vm02.iphfbm' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:6800/3034129265"}]': finished 2026-03-10T06:59:08.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:07 vm02 ceph-mon[50158]: osdmap e68: 8 total, 8 up, 8 in 2026-03-10T06:59:08.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:07 vm02 ceph-mon[50158]: from='client.? 192.168.123.102:0/2903672123' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/3993166382"}]: dispatch 2026-03-10T06:59:08.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:07 vm02 ceph-mon[50158]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/3993166382"}]: dispatch 2026-03-10T06:59:08.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:07 vm05 ceph-mon[48591]: pgmap v50: 161 pgs: 161 active+clean; 457 KiB data, 94 MiB used, 160 GiB / 160 GiB avail 2026-03-10T06:59:08.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:07 vm05 ceph-mon[48591]: from='client.14784 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T06:59:08.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:07 vm05 ceph-mon[48591]: from='client.? 192.168.123.102:0/544320925' entity='client.iscsi.foo.vm02.iphfbm' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:6800/3034129265"}]': finished 2026-03-10T06:59:08.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:07 vm05 ceph-mon[48591]: osdmap e68: 8 total, 8 up, 8 in 2026-03-10T06:59:08.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:07 vm05 ceph-mon[48591]: from='client.? 192.168.123.102:0/2903672123' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/3993166382"}]: dispatch 2026-03-10T06:59:08.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:07 vm05 ceph-mon[48591]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/3993166382"}]: dispatch 2026-03-10T06:59:08.835 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 06:59:08 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=info ts=2026-03-10T06:59:08.345Z caller=cluster.go:688 component=cluster msg="gossip settled; proceeding" elapsed=10.004059747s 2026-03-10T06:59:09.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:08 vm05 ceph-mon[48591]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/3993166382"}]': finished 2026-03-10T06:59:09.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:08 vm05 ceph-mon[48591]: osdmap e69: 8 total, 8 up, 8 in 2026-03-10T06:59:09.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:08 vm05 ceph-mon[48591]: from='client.? 192.168.123.102:0/2671159210' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:6801/3034129265"}]: dispatch 2026-03-10T06:59:09.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:08 vm05 ceph-mon[48591]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:6801/3034129265"}]: dispatch 2026-03-10T06:59:09.334 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:08 vm02 ceph-mon[54377]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/3993166382"}]': finished 2026-03-10T06:59:09.334 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:08 vm02 ceph-mon[54377]: osdmap e69: 8 total, 8 up, 8 in 2026-03-10T06:59:09.334 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:08 vm02 ceph-mon[54377]: from='client.? 192.168.123.102:0/2671159210' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:6801/3034129265"}]: dispatch 2026-03-10T06:59:09.334 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:08 vm02 ceph-mon[54377]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:6801/3034129265"}]: dispatch 2026-03-10T06:59:09.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:08 vm02 ceph-mon[50158]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/3993166382"}]': finished 2026-03-10T06:59:09.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:08 vm02 ceph-mon[50158]: osdmap e69: 8 total, 8 up, 8 in 2026-03-10T06:59:09.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:08 vm02 ceph-mon[50158]: from='client.? 192.168.123.102:0/2671159210' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:6801/3034129265"}]: dispatch 2026-03-10T06:59:09.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:08 vm02 ceph-mon[50158]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:6801/3034129265"}]: dispatch 2026-03-10T06:59:10.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:09 vm05 ceph-mon[48591]: pgmap v53: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T06:59:10.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:09 vm05 ceph-mon[48591]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:6801/3034129265"}]': finished 2026-03-10T06:59:10.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:09 vm05 ceph-mon[48591]: osdmap e70: 8 total, 8 up, 8 in 2026-03-10T06:59:10.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:09 vm05 ceph-mon[48591]: from='client.? 192.168.123.102:0/3847380712' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/2512740709"}]: dispatch 2026-03-10T06:59:10.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:09 vm05 ceph-mon[48591]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/2512740709"}]: dispatch 2026-03-10T06:59:10.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:09 vm02 ceph-mon[54377]: pgmap v53: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T06:59:10.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:09 vm02 ceph-mon[54377]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:6801/3034129265"}]': finished 2026-03-10T06:59:10.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:09 vm02 ceph-mon[54377]: osdmap e70: 8 total, 8 up, 8 in 2026-03-10T06:59:10.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:09 vm02 ceph-mon[54377]: from='client.? 192.168.123.102:0/3847380712' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/2512740709"}]: dispatch 2026-03-10T06:59:10.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:09 vm02 ceph-mon[54377]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/2512740709"}]: dispatch 2026-03-10T06:59:10.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:09 vm02 ceph-mon[50158]: pgmap v53: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T06:59:10.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:09 vm02 ceph-mon[50158]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:6801/3034129265"}]': finished 2026-03-10T06:59:10.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:09 vm02 ceph-mon[50158]: osdmap e70: 8 total, 8 up, 8 in 2026-03-10T06:59:10.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:09 vm02 ceph-mon[50158]: from='client.? 192.168.123.102:0/3847380712' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/2512740709"}]: dispatch 2026-03-10T06:59:10.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:09 vm02 ceph-mon[50158]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/2512740709"}]: dispatch 2026-03-10T06:59:11.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:10 vm05 ceph-mon[48591]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/2512740709"}]': finished 2026-03-10T06:59:11.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:10 vm05 ceph-mon[48591]: osdmap e71: 8 total, 8 up, 8 in 2026-03-10T06:59:11.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:10 vm05 ceph-mon[48591]: from='client.? 192.168.123.102:0/732391025' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/370820550"}]: dispatch 2026-03-10T06:59:11.334 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:10 vm02 ceph-mon[54377]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/2512740709"}]': finished 2026-03-10T06:59:11.334 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:10 vm02 ceph-mon[54377]: osdmap e71: 8 total, 8 up, 8 in 2026-03-10T06:59:11.334 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:10 vm02 ceph-mon[54377]: from='client.? 192.168.123.102:0/732391025' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/370820550"}]: dispatch 2026-03-10T06:59:11.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:10 vm02 ceph-mon[50158]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/2512740709"}]': finished 2026-03-10T06:59:11.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:10 vm02 ceph-mon[50158]: osdmap e71: 8 total, 8 up, 8 in 2026-03-10T06:59:11.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:10 vm02 ceph-mon[50158]: from='client.? 192.168.123.102:0/732391025' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/370820550"}]: dispatch 2026-03-10T06:59:12.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:11 vm05 ceph-mon[48591]: pgmap v56: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T06:59:12.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:11 vm05 ceph-mon[48591]: from='client.? 192.168.123.102:0/732391025' entity='client.iscsi.foo.vm02.iphfbm' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/370820550"}]': finished 2026-03-10T06:59:12.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:11 vm05 ceph-mon[48591]: osdmap e72: 8 total, 8 up, 8 in 2026-03-10T06:59:12.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:11 vm05 ceph-mon[48591]: from='client.? 192.168.123.102:0/1174511729' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/1019363220"}]: dispatch 2026-03-10T06:59:12.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:11 vm05 ceph-mon[48591]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/1019363220"}]: dispatch 2026-03-10T06:59:12.334 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:11 vm02 ceph-mon[54377]: pgmap v56: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T06:59:12.334 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:11 vm02 ceph-mon[54377]: from='client.? 192.168.123.102:0/732391025' entity='client.iscsi.foo.vm02.iphfbm' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/370820550"}]': finished 2026-03-10T06:59:12.334 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:11 vm02 ceph-mon[54377]: osdmap e72: 8 total, 8 up, 8 in 2026-03-10T06:59:12.334 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:11 vm02 ceph-mon[54377]: from='client.? 192.168.123.102:0/1174511729' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/1019363220"}]: dispatch 2026-03-10T06:59:12.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:11 vm02 ceph-mon[54377]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/1019363220"}]: dispatch 2026-03-10T06:59:12.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:11 vm02 ceph-mon[50158]: pgmap v56: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T06:59:12.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:11 vm02 ceph-mon[50158]: from='client.? 192.168.123.102:0/732391025' entity='client.iscsi.foo.vm02.iphfbm' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/370820550"}]': finished 2026-03-10T06:59:12.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:11 vm02 ceph-mon[50158]: osdmap e72: 8 total, 8 up, 8 in 2026-03-10T06:59:12.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:11 vm02 ceph-mon[50158]: from='client.? 192.168.123.102:0/1174511729' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/1019363220"}]: dispatch 2026-03-10T06:59:12.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:11 vm02 ceph-mon[50158]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/1019363220"}]: dispatch 2026-03-10T06:59:13.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:12 vm05 ceph-mon[48591]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/1019363220"}]': finished 2026-03-10T06:59:13.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:12 vm05 ceph-mon[48591]: osdmap e73: 8 total, 8 up, 8 in 2026-03-10T06:59:13.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:12 vm05 ceph-mon[48591]: from='client.? 192.168.123.102:0/1128740777' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/1375953868"}]: dispatch 2026-03-10T06:59:13.334 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:12 vm02 ceph-mon[54377]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/1019363220"}]': finished 2026-03-10T06:59:13.334 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:12 vm02 ceph-mon[54377]: osdmap e73: 8 total, 8 up, 8 in 2026-03-10T06:59:13.334 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:12 vm02 ceph-mon[54377]: from='client.? 192.168.123.102:0/1128740777' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/1375953868"}]: dispatch 2026-03-10T06:59:13.334 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:12 vm02 ceph-mon[50158]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/1019363220"}]': finished 2026-03-10T06:59:13.334 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:12 vm02 ceph-mon[50158]: osdmap e73: 8 total, 8 up, 8 in 2026-03-10T06:59:13.334 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:12 vm02 ceph-mon[50158]: from='client.? 192.168.123.102:0/1128740777' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/1375953868"}]: dispatch 2026-03-10T06:59:14.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:13 vm05 ceph-mon[48591]: pgmap v59: 161 pgs: 161 active+clean; 457 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T06:59:14.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:13 vm05 ceph-mon[48591]: from='client.? 192.168.123.102:0/1128740777' entity='client.iscsi.foo.vm02.iphfbm' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/1375953868"}]': finished 2026-03-10T06:59:14.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:13 vm05 ceph-mon[48591]: osdmap e74: 8 total, 8 up, 8 in 2026-03-10T06:59:14.334 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:13 vm02 ceph-mon[54377]: pgmap v59: 161 pgs: 161 active+clean; 457 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T06:59:14.334 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:13 vm02 ceph-mon[54377]: from='client.? 192.168.123.102:0/1128740777' entity='client.iscsi.foo.vm02.iphfbm' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/1375953868"}]': finished 2026-03-10T06:59:14.334 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:13 vm02 ceph-mon[54377]: osdmap e74: 8 total, 8 up, 8 in 2026-03-10T06:59:14.334 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:13 vm02 ceph-mon[50158]: pgmap v59: 161 pgs: 161 active+clean; 457 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T06:59:14.334 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:13 vm02 ceph-mon[50158]: from='client.? 192.168.123.102:0/1128740777' entity='client.iscsi.foo.vm02.iphfbm' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/1375953868"}]': finished 2026-03-10T06:59:14.334 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:13 vm02 ceph-mon[50158]: osdmap e74: 8 total, 8 up, 8 in 2026-03-10T06:59:15.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:15 vm05 ceph-mon[48591]: pgmap v61: 161 pgs: 161 active+clean; 457 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-10T06:59:15.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:15 vm02 ceph-mon[54377]: pgmap v61: 161 pgs: 161 active+clean; 457 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-10T06:59:15.584 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:15 vm02 ceph-mon[50158]: pgmap v61: 161 pgs: 161 active+clean; 457 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-10T06:59:16.753 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 06:59:16 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[49719]: ::ffff:192.168.123.105 - - [10/Mar/2026:06:59:16] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-10T06:59:17.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:17 vm05 ceph-mon[48591]: pgmap v62: 161 pgs: 161 active+clean; 457 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T06:59:17.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:17 vm05 ceph-mon[48591]: from='client.14784 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T06:59:17.782 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:17 vm02 ceph-mon[54377]: pgmap v62: 161 pgs: 161 active+clean; 457 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T06:59:17.782 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:17 vm02 ceph-mon[54377]: from='client.14784 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T06:59:17.782 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:17 vm02 ceph-mon[50158]: pgmap v62: 161 pgs: 161 active+clean; 457 KiB data, 96 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T06:59:17.782 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:17 vm02 ceph-mon[50158]: from='client.14784 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T06:59:18.084 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 06:59:17 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[50367]: ::ffff:192.168.123.105 - - [10/Mar/2026:06:59:17] "GET /metrics HTTP/1.1" 200 214512 "" "Prometheus/2.33.4" 2026-03-10T06:59:19.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:19 vm05 ceph-mon[48591]: pgmap v63: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.4 KiB/s rd, 1 op/s 2026-03-10T06:59:19.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:19 vm02 ceph-mon[54377]: pgmap v63: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.4 KiB/s rd, 1 op/s 2026-03-10T06:59:19.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:19 vm02 ceph-mon[50158]: pgmap v63: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.4 KiB/s rd, 1 op/s 2026-03-10T06:59:20.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:20 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "2.f", "id": [7, 2]}]: dispatch 2026-03-10T06:59:20.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:20 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.a", "id": [1, 2]}]: dispatch 2026-03-10T06:59:20.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:20 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.1c", "id": [1, 5]}]: dispatch 2026-03-10T06:59:20.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:20 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.1f", "id": [1, 2]}]: dispatch 2026-03-10T06:59:20.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:20 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:59:20.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:20 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T06:59:20.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:20 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "2.f", "id": [7, 2]}]: dispatch 2026-03-10T06:59:20.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:20 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.a", "id": [1, 2]}]: dispatch 2026-03-10T06:59:20.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:20 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.1c", "id": [1, 5]}]: dispatch 2026-03-10T06:59:20.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:20 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.1f", "id": [1, 2]}]: dispatch 2026-03-10T06:59:20.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:20 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:59:20.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:20 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T06:59:20.834 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:20 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "2.f", "id": [7, 2]}]: dispatch 2026-03-10T06:59:20.834 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:20 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.a", "id": [1, 2]}]: dispatch 2026-03-10T06:59:20.834 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:20 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.1c", "id": [1, 5]}]: dispatch 2026-03-10T06:59:20.834 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:20 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.1f", "id": [1, 2]}]: dispatch 2026-03-10T06:59:20.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:20 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T06:59:20.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:20 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T06:59:21.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:21 vm05 ceph-mon[48591]: pgmap v64: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 612 B/s rd, 0 op/s 2026-03-10T06:59:21.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:21 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "2.f", "id": [7, 2]}]': finished 2026-03-10T06:59:21.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:21 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.a", "id": [1, 2]}]': finished 2026-03-10T06:59:21.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:21 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.1c", "id": [1, 5]}]': finished 2026-03-10T06:59:21.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:21 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.1f", "id": [1, 2]}]': finished 2026-03-10T06:59:21.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:21 vm05 ceph-mon[48591]: osdmap e75: 8 total, 8 up, 8 in 2026-03-10T06:59:21.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:21 vm02 ceph-mon[54377]: pgmap v64: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 612 B/s rd, 0 op/s 2026-03-10T06:59:21.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:21 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "2.f", "id": [7, 2]}]': finished 2026-03-10T06:59:21.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:21 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.a", "id": [1, 2]}]': finished 2026-03-10T06:59:21.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:21 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.1c", "id": [1, 5]}]': finished 2026-03-10T06:59:21.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:21 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.1f", "id": [1, 2]}]': finished 2026-03-10T06:59:21.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:21 vm02 ceph-mon[54377]: osdmap e75: 8 total, 8 up, 8 in 2026-03-10T06:59:21.834 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:21 vm02 ceph-mon[50158]: pgmap v64: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 612 B/s rd, 0 op/s 2026-03-10T06:59:21.834 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:21 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "2.f", "id": [7, 2]}]': finished 2026-03-10T06:59:21.834 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:21 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.a", "id": [1, 2]}]': finished 2026-03-10T06:59:21.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:21 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.1c", "id": [1, 5]}]': finished 2026-03-10T06:59:21.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:21 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.1f", "id": [1, 2]}]': finished 2026-03-10T06:59:21.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:21 vm02 ceph-mon[50158]: osdmap e75: 8 total, 8 up, 8 in 2026-03-10T06:59:22.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:22 vm05 ceph-mon[48591]: osdmap e76: 8 total, 8 up, 8 in 2026-03-10T06:59:22.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:22 vm02 ceph-mon[54377]: osdmap e76: 8 total, 8 up, 8 in 2026-03-10T06:59:22.834 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:22 vm02 ceph-mon[50158]: osdmap e76: 8 total, 8 up, 8 in 2026-03-10T06:59:23.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:23 vm05 ceph-mon[48591]: pgmap v67: 161 pgs: 4 peering, 157 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T06:59:23.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:23 vm02 ceph-mon[54377]: pgmap v67: 161 pgs: 4 peering, 157 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T06:59:23.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:23 vm02 ceph-mon[50158]: pgmap v67: 161 pgs: 4 peering, 157 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T06:59:25.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:25 vm05 ceph-mon[48591]: pgmap v68: 161 pgs: 4 peering, 157 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T06:59:25.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:25 vm02 ceph-mon[54377]: pgmap v68: 161 pgs: 4 peering, 157 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T06:59:25.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:25 vm02 ceph-mon[50158]: pgmap v68: 161 pgs: 4 peering, 157 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T06:59:26.753 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 06:59:26 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[49719]: ::ffff:192.168.123.105 - - [10/Mar/2026:06:59:26] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-10T06:59:27.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:27 vm02 ceph-mon[54377]: pgmap v69: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s; 5 B/s, 0 objects/s recovering 2026-03-10T06:59:27.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:27 vm02 ceph-mon[54377]: from='client.14784 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T06:59:27.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:27 vm02 ceph-mon[50158]: pgmap v69: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s; 5 B/s, 0 objects/s recovering 2026-03-10T06:59:27.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:27 vm02 ceph-mon[50158]: from='client.14784 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T06:59:27.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:27 vm05 ceph-mon[48591]: pgmap v69: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s; 5 B/s, 0 objects/s recovering 2026-03-10T06:59:27.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:27 vm05 ceph-mon[48591]: from='client.14784 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T06:59:28.084 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 06:59:27 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[50367]: ::ffff:192.168.123.105 - - [10/Mar/2026:06:59:27] "GET /metrics HTTP/1.1" 200 214480 "" "Prometheus/2.33.4" 2026-03-10T06:59:29.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:29 vm02 ceph-mon[54377]: pgmap v70: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s; 5 B/s, 0 objects/s recovering 2026-03-10T06:59:29.584 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:29 vm02 ceph-mon[50158]: pgmap v70: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s; 5 B/s, 0 objects/s recovering 2026-03-10T06:59:29.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:29 vm05 ceph-mon[48591]: pgmap v70: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s; 5 B/s, 0 objects/s recovering 2026-03-10T06:59:31.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:31 vm05 ceph-mon[48591]: pgmap v71: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.0 KiB/s rd, 1 op/s; 4 B/s, 0 objects/s recovering 2026-03-10T06:59:31.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:31 vm02 ceph-mon[54377]: pgmap v71: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.0 KiB/s rd, 1 op/s; 4 B/s, 0 objects/s recovering 2026-03-10T06:59:31.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:31 vm02 ceph-mon[50158]: pgmap v71: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.0 KiB/s rd, 1 op/s; 4 B/s, 0 objects/s recovering 2026-03-10T06:59:33.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:33 vm05 ceph-mon[48591]: pgmap v72: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 942 B/s rd, 0 op/s; 4 B/s, 0 objects/s recovering 2026-03-10T06:59:33.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:33 vm02 ceph-mon[54377]: pgmap v72: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 942 B/s rd, 0 op/s; 4 B/s, 0 objects/s recovering 2026-03-10T06:59:33.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:33 vm02 ceph-mon[50158]: pgmap v72: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 942 B/s rd, 0 op/s; 4 B/s, 0 objects/s recovering 2026-03-10T06:59:35.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:35 vm05 ceph-mon[48591]: pgmap v73: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s; 3 B/s, 0 objects/s recovering 2026-03-10T06:59:35.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:35 vm02 ceph-mon[54377]: pgmap v73: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s; 3 B/s, 0 objects/s recovering 2026-03-10T06:59:35.584 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:35 vm02 ceph-mon[50158]: pgmap v73: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s; 3 B/s, 0 objects/s recovering 2026-03-10T06:59:36.753 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 06:59:36 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[49719]: ::ffff:192.168.123.105 - - [10/Mar/2026:06:59:36] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-10T06:59:37.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:37 vm02 ceph-mon[54377]: pgmap v74: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s; 3 B/s, 0 objects/s recovering 2026-03-10T06:59:37.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:37 vm02 ceph-mon[54377]: from='client.14784 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T06:59:37.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:37 vm02 ceph-mon[50158]: pgmap v74: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s; 3 B/s, 0 objects/s recovering 2026-03-10T06:59:37.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:37 vm02 ceph-mon[50158]: from='client.14784 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T06:59:37.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:37 vm05 ceph-mon[48591]: pgmap v74: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s; 3 B/s, 0 objects/s recovering 2026-03-10T06:59:37.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:37 vm05 ceph-mon[48591]: from='client.14784 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T06:59:38.084 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 06:59:37 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[50367]: ::ffff:192.168.123.105 - - [10/Mar/2026:06:59:37] "GET /metrics HTTP/1.1" 200 214463 "" "Prometheus/2.33.4" 2026-03-10T06:59:39.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:39 vm02 ceph-mon[54377]: pgmap v75: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T06:59:39.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:39 vm02 ceph-mon[50158]: pgmap v75: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T06:59:39.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:39 vm05 ceph-mon[48591]: pgmap v75: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T06:59:41.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:41 vm02 ceph-mon[54377]: pgmap v76: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T06:59:41.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:41 vm02 ceph-mon[50158]: pgmap v76: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T06:59:41.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:41 vm05 ceph-mon[48591]: pgmap v76: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T06:59:43.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:43 vm02 ceph-mon[54377]: pgmap v77: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T06:59:43.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:43 vm02 ceph-mon[50158]: pgmap v77: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T06:59:43.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:43 vm05 ceph-mon[48591]: pgmap v77: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T06:59:45.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:45 vm02 ceph-mon[54377]: pgmap v78: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T06:59:45.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:45 vm02 ceph-mon[50158]: pgmap v78: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T06:59:45.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:45 vm05 ceph-mon[48591]: pgmap v78: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T06:59:46.753 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 06:59:46 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[49719]: ::ffff:192.168.123.105 - - [10/Mar/2026:06:59:46] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-10T06:59:47.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:47 vm05 ceph-mon[48591]: pgmap v79: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T06:59:47.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:47 vm05 ceph-mon[48591]: from='client.14784 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T06:59:47.783 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:47 vm02 ceph-mon[54377]: pgmap v79: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T06:59:47.783 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:47 vm02 ceph-mon[54377]: from='client.14784 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T06:59:47.783 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:47 vm02 ceph-mon[50158]: pgmap v79: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T06:59:47.783 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:47 vm02 ceph-mon[50158]: from='client.14784 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T06:59:48.085 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 06:59:47 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[50367]: ::ffff:192.168.123.105 - - [10/Mar/2026:06:59:47] "GET /metrics HTTP/1.1" 200 214463 "" "Prometheus/2.33.4" 2026-03-10T06:59:49.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:49 vm05 ceph-mon[48591]: pgmap v80: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T06:59:49.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:49 vm02 ceph-mon[54377]: pgmap v80: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T06:59:49.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:49 vm02 ceph-mon[50158]: pgmap v80: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T06:59:51.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:51 vm05 ceph-mon[48591]: pgmap v81: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T06:59:51.839 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:51 vm02 ceph-mon[54377]: pgmap v81: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T06:59:51.839 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:51 vm02 ceph-mon[50158]: pgmap v81: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T06:59:54.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:53 vm05 ceph-mon[48591]: pgmap v82: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T06:59:54.084 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:53 vm02 ceph-mon[54377]: pgmap v82: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T06:59:54.086 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:53 vm02 ceph-mon[50158]: pgmap v82: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T06:59:56.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:55 vm05 ceph-mon[48591]: pgmap v83: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T06:59:56.334 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:55 vm02 ceph-mon[54377]: pgmap v83: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T06:59:56.334 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:55 vm02 ceph-mon[50158]: pgmap v83: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T06:59:56.753 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 06:59:56 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[49719]: ::ffff:192.168.123.105 - - [10/Mar/2026:06:59:56] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-10T06:59:58.084 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:57 vm02 ceph-mon[54377]: pgmap v84: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T06:59:58.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:57 vm02 ceph-mon[54377]: from='client.14784 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T06:59:58.085 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 06:59:57 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[50367]: ::ffff:192.168.123.105 - - [10/Mar/2026:06:59:57] "GET /metrics HTTP/1.1" 200 214462 "" "Prometheus/2.33.4" 2026-03-10T06:59:58.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:57 vm02 ceph-mon[50158]: pgmap v84: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T06:59:58.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:57 vm02 ceph-mon[50158]: from='client.14784 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T06:59:58.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:57 vm05 ceph-mon[48591]: pgmap v84: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T06:59:58.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:57 vm05 ceph-mon[48591]: from='client.14784 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:00:00.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 06:59:59 vm05 ceph-mon[48591]: pgmap v85: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:00:00.334 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 06:59:59 vm02 ceph-mon[54377]: pgmap v85: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:00:00.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 06:59:59 vm02 ceph-mon[50158]: pgmap v85: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:00:01.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:00:01 vm05 ceph-mon[48591]: overall HEALTH_OK 2026-03-10T07:00:01.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:00:01 vm02 ceph-mon[54377]: overall HEALTH_OK 2026-03-10T07:00:01.584 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:00:01 vm02 ceph-mon[50158]: overall HEALTH_OK 2026-03-10T07:00:02.443 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:00:02 vm02 ceph-mon[50158]: pgmap v86: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:00:02.443 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:00:02 vm02 ceph-mon[54377]: pgmap v86: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:00:02.450 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:00:02 vm05 ceph-mon[48591]: pgmap v86: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:00:03.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:00:03 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:00:03.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:00:03 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T07:00:03.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:00:03 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T07:00:03.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:00:03 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:00:03.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:00:03 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T07:00:03.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:00:03 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T07:00:03.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:00:03 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:00:03.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:00:03 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T07:00:03.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:00:03 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T07:00:04.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:00:04 vm05 ceph-mon[48591]: pgmap v87: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:00:04.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:00:04 vm02 ceph-mon[54377]: pgmap v87: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:00:04.584 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:00:04 vm02 ceph-mon[50158]: pgmap v87: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:00:06.503 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:00:06 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[49719]: ::ffff:192.168.123.105 - - [10/Mar/2026:07:00:06] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-10T07:00:06.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:00:06 vm05 ceph-mon[48591]: pgmap v88: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:00:06.514 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:00:06 vm02 ceph-mon[54377]: pgmap v88: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:00:06.514 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:00:06 vm02 ceph-mon[50158]: pgmap v88: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:00:08.084 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:00:07 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[50367]: ::ffff:192.168.123.105 - - [10/Mar/2026:07:00:07] "GET /metrics HTTP/1.1" 200 214444 "" "Prometheus/2.33.4" 2026-03-10T07:00:08.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:00:08 vm05 ceph-mon[48591]: pgmap v89: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:00:08.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:00:08 vm05 ceph-mon[48591]: from='client.14784 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:00:08.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:00:08 vm02 ceph-mon[54377]: pgmap v89: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:00:08.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:00:08 vm02 ceph-mon[54377]: from='client.14784 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:00:08.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:00:08 vm02 ceph-mon[50158]: pgmap v89: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:00:08.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:00:08 vm02 ceph-mon[50158]: from='client.14784 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:00:10.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:00:10 vm05 ceph-mon[48591]: pgmap v90: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:00:10.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:00:10 vm02 ceph-mon[54377]: pgmap v90: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:00:10.584 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:00:10 vm02 ceph-mon[50158]: pgmap v90: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:00:12.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:00:12 vm05 ceph-mon[48591]: pgmap v91: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:00:12.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:00:12 vm02 ceph-mon[54377]: pgmap v91: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:00:12.584 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:00:12 vm02 ceph-mon[50158]: pgmap v91: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:00:13.835 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:00:13 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=warn ts=2026-03-10T07:00:13.519Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.105:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-10T07:00:13.835 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:00:13 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=warn ts=2026-03-10T07:00:13.519Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.102:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.102 because it doesn't contain any IP SANs" 2026-03-10T07:00:13.835 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:00:13 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=warn ts=2026-03-10T07:00:13.522Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.102:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.102 because it doesn't contain any IP SANs" 2026-03-10T07:00:13.835 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:00:13 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=warn ts=2026-03-10T07:00:13.522Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.105:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-10T07:00:14.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:00:14 vm05 ceph-mon[48591]: pgmap v92: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:00:14.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:00:14 vm02 ceph-mon[54377]: pgmap v92: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:00:14.584 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:00:14 vm02 ceph-mon[50158]: pgmap v92: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:00:15.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:00:15 vm02 ceph-mon[54377]: pgmap v93: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:00:15.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:00:15 vm02 ceph-mon[50158]: pgmap v93: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:00:15.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:00:15 vm05 ceph-mon[48591]: pgmap v93: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:00:16.753 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:00:16 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[49719]: ::ffff:192.168.123.105 - - [10/Mar/2026:07:00:16] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-10T07:00:17.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:00:17 vm02 ceph-mon[54377]: pgmap v94: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:00:17.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:00:17 vm02 ceph-mon[54377]: from='client.14784 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:00:17.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:00:17 vm02 ceph-mon[50158]: pgmap v94: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:00:17.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:00:17 vm02 ceph-mon[50158]: from='client.14784 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:00:17.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:00:17 vm05 ceph-mon[48591]: pgmap v94: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:00:17.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:00:17 vm05 ceph-mon[48591]: from='client.14784 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:00:18.084 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:00:17 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[50367]: ::ffff:192.168.123.105 - - [10/Mar/2026:07:00:17] "GET /metrics HTTP/1.1" 200 214444 "" "Prometheus/2.33.4" 2026-03-10T07:00:19.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:00:19 vm02 ceph-mon[54377]: pgmap v95: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:00:19.584 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:00:19 vm02 ceph-mon[50158]: pgmap v95: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:00:19.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:00:19 vm05 ceph-mon[48591]: pgmap v95: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:00:20.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:00:20 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T07:00:20.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:00:20 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T07:00:20.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:00:20 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T07:00:21.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:00:21 vm02 ceph-mon[54377]: pgmap v96: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:00:21.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:00:21 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T07:00:21.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:00:21 vm02 ceph-mon[50158]: pgmap v96: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:00:21.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:00:21 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T07:00:21.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:00:21 vm05 ceph-mon[48591]: pgmap v96: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:00:21.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:00:21 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T07:00:23.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:00:23 vm02 ceph-mon[54377]: pgmap v97: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:00:23.585 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:00:23 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=error ts=2026-03-10T07:00:23.505Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.102:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.102 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.105:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-10T07:00:23.585 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:00:23 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=error ts=2026-03-10T07:00:23.505Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.105:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.102:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.102 because it doesn't contain any IP SANs" 2026-03-10T07:00:23.585 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:00:23 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=warn ts=2026-03-10T07:00:23.506Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.105:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-10T07:00:23.585 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:00:23 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=warn ts=2026-03-10T07:00:23.506Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.102:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.102 because it doesn't contain any IP SANs" 2026-03-10T07:00:23.585 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:00:23 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=warn ts=2026-03-10T07:00:23.507Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.105:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-10T07:00:23.585 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:00:23 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=warn ts=2026-03-10T07:00:23.508Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.102:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.102 because it doesn't contain any IP SANs" 2026-03-10T07:00:23.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:00:23 vm02 ceph-mon[50158]: pgmap v97: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:00:23.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:00:23 vm05 ceph-mon[48591]: pgmap v97: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:00:25.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:00:25 vm02 ceph-mon[54377]: pgmap v98: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:00:25.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:00:25 vm02 ceph-mon[50158]: pgmap v98: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:00:25.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:00:25 vm05 ceph-mon[48591]: pgmap v98: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:00:26.753 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:00:26 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[49719]: ::ffff:192.168.123.105 - - [10/Mar/2026:07:00:26] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-10T07:00:27.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:00:27 vm05 ceph-mon[48591]: pgmap v99: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:00:27.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:00:27 vm05 ceph-mon[48591]: from='client.14784 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:00:27.782 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:00:27 vm02 ceph-mon[54377]: pgmap v99: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:00:27.782 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:00:27 vm02 ceph-mon[54377]: from='client.14784 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:00:27.782 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:00:27 vm02 ceph-mon[50158]: pgmap v99: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:00:27.782 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:00:27 vm02 ceph-mon[50158]: from='client.14784 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:00:28.084 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:00:27 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[50367]: ::ffff:192.168.123.105 - - [10/Mar/2026:07:00:27] "GET /metrics HTTP/1.1" 200 214460 "" "Prometheus/2.33.4" 2026-03-10T07:00:29.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:00:29 vm05 ceph-mon[48591]: pgmap v100: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:00:29.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:00:29 vm02 ceph-mon[54377]: pgmap v100: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:00:29.834 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:00:29 vm02 ceph-mon[50158]: pgmap v100: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:00:31.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:00:31 vm05 ceph-mon[48591]: pgmap v101: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:00:31.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:00:31 vm02 ceph-mon[54377]: pgmap v101: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:00:31.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:00:31 vm02 ceph-mon[50158]: pgmap v101: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:00:33.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:00:33 vm05 ceph-mon[48591]: pgmap v102: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:00:33.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:00:33 vm02 ceph-mon[54377]: pgmap v102: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:00:33.834 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:00:33 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=error ts=2026-03-10T07:00:33.505Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.105:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.102:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.102 because it doesn't contain any IP SANs" 2026-03-10T07:00:33.834 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:00:33 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=error ts=2026-03-10T07:00:33.505Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.102:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.102 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.105:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-10T07:00:33.835 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:00:33 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=warn ts=2026-03-10T07:00:33.507Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.105:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-10T07:00:33.835 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:00:33 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=warn ts=2026-03-10T07:00:33.507Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.102:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.102 because it doesn't contain any IP SANs" 2026-03-10T07:00:33.835 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:00:33 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=warn ts=2026-03-10T07:00:33.508Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.105:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-10T07:00:33.835 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:00:33 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=warn ts=2026-03-10T07:00:33.508Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.102:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.102 because it doesn't contain any IP SANs" 2026-03-10T07:00:33.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:00:33 vm02 ceph-mon[50158]: pgmap v102: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:00:35.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:00:35 vm05 ceph-mon[48591]: pgmap v103: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:00:35.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:00:35 vm02 ceph-mon[54377]: pgmap v103: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:00:35.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:00:35 vm02 ceph-mon[50158]: pgmap v103: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:00:36.753 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:00:36 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[49719]: ::ffff:192.168.123.105 - - [10/Mar/2026:07:00:36] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-10T07:00:37.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:00:37 vm05 ceph-mon[48591]: pgmap v104: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:00:37.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:00:37 vm05 ceph-mon[48591]: from='client.14784 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:00:37.782 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:00:37 vm02 ceph-mon[54377]: pgmap v104: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:00:37.782 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:00:37 vm02 ceph-mon[54377]: from='client.14784 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:00:37.782 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:00:37 vm02 ceph-mon[50158]: pgmap v104: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:00:37.782 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:00:37 vm02 ceph-mon[50158]: from='client.14784 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:00:38.084 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:00:37 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[50367]: ::ffff:192.168.123.105 - - [10/Mar/2026:07:00:37] "GET /metrics HTTP/1.1" 200 214483 "" "Prometheus/2.33.4" 2026-03-10T07:00:39.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:00:39 vm05 ceph-mon[48591]: pgmap v105: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:00:39.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:00:39 vm02 ceph-mon[54377]: pgmap v105: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:00:39.834 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:00:39 vm02 ceph-mon[50158]: pgmap v105: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:00:41.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:00:41 vm05 ceph-mon[48591]: pgmap v106: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:00:41.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:00:41 vm02 ceph-mon[54377]: pgmap v106: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:00:41.834 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:00:41 vm02 ceph-mon[50158]: pgmap v106: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:00:43.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:00:43 vm05 ceph-mon[48591]: pgmap v107: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:00:43.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:00:43 vm02 ceph-mon[54377]: pgmap v107: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:00:43.834 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:00:43 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=error ts=2026-03-10T07:00:43.506Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.105:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.102:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.102 because it doesn't contain any IP SANs" 2026-03-10T07:00:43.834 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:00:43 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=error ts=2026-03-10T07:00:43.506Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.102:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.102 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.105:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-10T07:00:43.834 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:00:43 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=warn ts=2026-03-10T07:00:43.508Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.102:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.102 because it doesn't contain any IP SANs" 2026-03-10T07:00:43.834 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:00:43 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=warn ts=2026-03-10T07:00:43.508Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.105:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-10T07:00:43.834 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:00:43 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=warn ts=2026-03-10T07:00:43.509Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.102:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.102 because it doesn't contain any IP SANs" 2026-03-10T07:00:43.835 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:00:43 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=warn ts=2026-03-10T07:00:43.509Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.105:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-10T07:00:43.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:00:43 vm02 ceph-mon[50158]: pgmap v107: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:00:45.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:00:45 vm05 ceph-mon[48591]: pgmap v108: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:00:45.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:00:45 vm02 ceph-mon[54377]: pgmap v108: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:00:45.834 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:00:45 vm02 ceph-mon[50158]: pgmap v108: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:00:46.753 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:00:46 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[49719]: ::ffff:192.168.123.105 - - [10/Mar/2026:07:00:46] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-10T07:00:47.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:00:47 vm05 ceph-mon[48591]: pgmap v109: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:00:47.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:00:47 vm05 ceph-mon[48591]: from='client.14784 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:00:47.783 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:00:47 vm02 ceph-mon[54377]: pgmap v109: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:00:47.783 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:00:47 vm02 ceph-mon[54377]: from='client.14784 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:00:47.783 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:00:47 vm02 ceph-mon[50158]: pgmap v109: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:00:47.783 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:00:47 vm02 ceph-mon[50158]: from='client.14784 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:00:48.084 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:00:47 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[50367]: ::ffff:192.168.123.105 - - [10/Mar/2026:07:00:47] "GET /metrics HTTP/1.1" 200 214483 "" "Prometheus/2.33.4" 2026-03-10T07:00:49.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:00:49 vm05 ceph-mon[48591]: pgmap v110: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:00:49.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:00:49 vm02 ceph-mon[54377]: pgmap v110: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:00:49.834 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:00:49 vm02 ceph-mon[50158]: pgmap v110: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:00:52.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:00:51 vm05 ceph-mon[48591]: pgmap v111: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:00:52.084 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:00:51 vm02 ceph-mon[54377]: pgmap v111: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:00:52.084 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:00:51 vm02 ceph-mon[50158]: pgmap v111: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:00:53.320 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph config set mon mon_warn_on_insecure_global_id_reclaim false --force' 2026-03-10T07:00:53.536 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:00:53 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=error ts=2026-03-10T07:00:53.507Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.102:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.102 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.105:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-10T07:00:53.536 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:00:53 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=error ts=2026-03-10T07:00:53.507Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.105:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.102:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.102 because it doesn't contain any IP SANs" 2026-03-10T07:00:53.536 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:00:53 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=warn ts=2026-03-10T07:00:53.509Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.102:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.102 because it doesn't contain any IP SANs" 2026-03-10T07:00:53.536 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:00:53 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=warn ts=2026-03-10T07:00:53.509Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.105:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-10T07:00:53.536 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:00:53 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=warn ts=2026-03-10T07:00:53.510Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.102:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.102 because it doesn't contain any IP SANs" 2026-03-10T07:00:53.536 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:00:53 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=warn ts=2026-03-10T07:00:53.510Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.105:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-10T07:00:53.810 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:00:53 vm02 ceph-mon[50158]: pgmap v112: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:00:53.810 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:00:53 vm02 ceph-mon[54377]: pgmap v112: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:00:53.847 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph config set mon mon_warn_on_insecure_global_id_reclaim_allowed false --force' 2026-03-10T07:00:54.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:00:53 vm05 ceph-mon[48591]: pgmap v112: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:00:54.372 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph config set global log_to_journald false --force' 2026-03-10T07:00:54.876 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1' 2026-03-10T07:00:55.355 INFO:teuthology.orchestra.run.vm02.stdout:Initiating upgrade to quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T07:00:55.416 INFO:teuthology.run_tasks:Running task cephadm.shell... 2026-03-10T07:00:55.418 INFO:tasks.cephadm:Running commands on role mon.a host ubuntu@vm02.local 2026-03-10T07:00:55.418 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'while ceph orch upgrade status | jq '"'"'.in_progress'"'"' | grep true && ! ceph orch upgrade status | jq '"'"'.message'"'"' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; ceph health detail ; sleep 30 ; done' 2026-03-10T07:00:55.917 INFO:teuthology.orchestra.run.vm02.stdout:true 2026-03-10T07:00:55.918 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:00:55 vm02 ceph-mon[50158]: pgmap v113: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:00:55.918 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:00:55 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T07:00:55.918 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:00:55 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:00:55.918 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:00:55 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T07:00:55.918 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:00:55 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T07:00:55.918 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:00:55 vm02 ceph-mon[54377]: pgmap v113: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:00:55.918 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:00:55 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T07:00:55.918 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:00:55 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:00:55.918 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:00:55 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T07:00:55.918 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:00:55 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T07:00:56.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:00:55 vm05 ceph-mon[48591]: pgmap v113: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:00:56.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:00:55 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T07:00:56.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:00:55 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:00:56.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:00:55 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T07:00:56.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:00:55 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T07:00:56.283 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T07:00:56.284 INFO:teuthology.orchestra.run.vm02.stdout:alertmanager.a vm02 *:9093,9094 running (118s) 114s ago 2m 14.6M - ba2b418f427c 519616b4f7a0 2026-03-10T07:00:56.284 INFO:teuthology.orchestra.run.vm02.stdout:grafana.a vm05 *:3000 running (2m) 114s ago 2m 46.8M - 8.3.5 dad864ee21e9 81ae9caed32e 2026-03-10T07:00:56.284 INFO:teuthology.orchestra.run.vm02.stdout:iscsi.foo.vm02.iphfbm vm02 running (2m) 114s ago 2m 67.0M - 3.5 e1d6a67b021e 204f70705475 2026-03-10T07:00:56.284 INFO:teuthology.orchestra.run.vm02.stdout:mgr.x vm05 *:8443 running (3m) 114s ago 3m 417M - 17.2.0 e1d6a67b021e 98ac9e4d7049 2026-03-10T07:00:56.284 INFO:teuthology.orchestra.run.vm02.stdout:mgr.y vm02 *:9283 running (4m) 114s ago 4m 463M - 17.2.0 e1d6a67b021e 1e8c9dfeac74 2026-03-10T07:00:56.284 INFO:teuthology.orchestra.run.vm02.stdout:mon.a vm02 running (4m) 114s ago 4m 51.9M 2048M 17.2.0 e1d6a67b021e 16a124fbc55b 2026-03-10T07:00:56.284 INFO:teuthology.orchestra.run.vm02.stdout:mon.b vm05 running (4m) 114s ago 4m 48.7M 2048M 17.2.0 e1d6a67b021e c223c571b7ce 2026-03-10T07:00:56.284 INFO:teuthology.orchestra.run.vm02.stdout:mon.c vm02 running (4m) 114s ago 4m 49.6M 2048M 17.2.0 e1d6a67b021e d42e67599bdc 2026-03-10T07:00:56.284 INFO:teuthology.orchestra.run.vm02.stdout:node-exporter.a vm02 *:9100 running (2m) 114s ago 2m 13.5M - 1dbe0e931976 79b6fdafea6a 2026-03-10T07:00:56.284 INFO:teuthology.orchestra.run.vm02.stdout:node-exporter.b vm05 *:9100 running (2m) 114s ago 2m 15.0M - 1dbe0e931976 4912deba3ea0 2026-03-10T07:00:56.284 INFO:teuthology.orchestra.run.vm02.stdout:osd.0 vm02 running (3m) 114s ago 3m 47.7M 4096M 17.2.0 e1d6a67b021e 0e972212a251 2026-03-10T07:00:56.284 INFO:teuthology.orchestra.run.vm02.stdout:osd.1 vm02 running (3m) 114s ago 3m 52.3M 4096M 17.2.0 e1d6a67b021e a9f4c8b44f48 2026-03-10T07:00:56.284 INFO:teuthology.orchestra.run.vm02.stdout:osd.2 vm02 running (3m) 114s ago 3m 45.2M 4096M 17.2.0 e1d6a67b021e 6409c5be18aa 2026-03-10T07:00:56.284 INFO:teuthology.orchestra.run.vm02.stdout:osd.3 vm02 running (3m) 114s ago 3m 45.6M 4096M 17.2.0 e1d6a67b021e 5df7cfae139f 2026-03-10T07:00:56.284 INFO:teuthology.orchestra.run.vm02.stdout:osd.4 vm05 running (3m) 114s ago 3m 48.8M 4096M 17.2.0 e1d6a67b021e dcf9e1b40e11 2026-03-10T07:00:56.284 INFO:teuthology.orchestra.run.vm02.stdout:osd.5 vm05 running (3m) 114s ago 3m 46.7M 4096M 17.2.0 e1d6a67b021e ef1a70593f89 2026-03-10T07:00:56.284 INFO:teuthology.orchestra.run.vm02.stdout:osd.6 vm05 running (2m) 114s ago 2m 48.8M 4096M 17.2.0 e1d6a67b021e 9f83e32d5eb6 2026-03-10T07:00:56.284 INFO:teuthology.orchestra.run.vm02.stdout:osd.7 vm05 running (2m) 114s ago 2m 47.3M 4096M 17.2.0 e1d6a67b021e 83d454094982 2026-03-10T07:00:56.284 INFO:teuthology.orchestra.run.vm02.stdout:prometheus.a vm05 *:9095 running (117s) 114s ago 2m 34.5M - 514e6a882f6e 354a9ecb0815 2026-03-10T07:00:56.284 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.kkmsll vm02 *:8000 running (2m) 114s ago 2m 91.0M - 17.2.0 e1d6a67b021e ef903c439808 2026-03-10T07:00:56.284 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm05.bmslvs vm05 *:8000 running (2m) 114s ago 2m 90.8M - 17.2.0 e1d6a67b021e acd35f4810d9 2026-03-10T07:00:56.284 INFO:teuthology.orchestra.run.vm02.stdout:rgw.smpl.vm02.kyvfxo vm02 *:80 running (2m) 114s ago 2m 89.1M - 17.2.0 e1d6a67b021e 6c68381e5378 2026-03-10T07:00:56.284 INFO:teuthology.orchestra.run.vm02.stdout:rgw.smpl.vm05.xjafam vm05 *:80 running (2m) 114s ago 2m 91.0M - 17.2.0 e1d6a67b021e 62a81876b05e 2026-03-10T07:00:56.500 INFO:teuthology.orchestra.run.vm02.stdout:{ 2026-03-10T07:00:56.501 INFO:teuthology.orchestra.run.vm02.stdout: "mon": { 2026-03-10T07:00:56.501 INFO:teuthology.orchestra.run.vm02.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 3 2026-03-10T07:00:56.501 INFO:teuthology.orchestra.run.vm02.stdout: }, 2026-03-10T07:00:56.501 INFO:teuthology.orchestra.run.vm02.stdout: "mgr": { 2026-03-10T07:00:56.501 INFO:teuthology.orchestra.run.vm02.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 2 2026-03-10T07:00:56.501 INFO:teuthology.orchestra.run.vm02.stdout: }, 2026-03-10T07:00:56.501 INFO:teuthology.orchestra.run.vm02.stdout: "osd": { 2026-03-10T07:00:56.501 INFO:teuthology.orchestra.run.vm02.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 8 2026-03-10T07:00:56.501 INFO:teuthology.orchestra.run.vm02.stdout: }, 2026-03-10T07:00:56.501 INFO:teuthology.orchestra.run.vm02.stdout: "mds": {}, 2026-03-10T07:00:56.501 INFO:teuthology.orchestra.run.vm02.stdout: "rgw": { 2026-03-10T07:00:56.501 INFO:teuthology.orchestra.run.vm02.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 4 2026-03-10T07:00:56.501 INFO:teuthology.orchestra.run.vm02.stdout: }, 2026-03-10T07:00:56.501 INFO:teuthology.orchestra.run.vm02.stdout: "overall": { 2026-03-10T07:00:56.501 INFO:teuthology.orchestra.run.vm02.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 17 2026-03-10T07:00:56.501 INFO:teuthology.orchestra.run.vm02.stdout: } 2026-03-10T07:00:56.501 INFO:teuthology.orchestra.run.vm02.stdout:} 2026-03-10T07:00:56.697 INFO:teuthology.orchestra.run.vm02.stdout:{ 2026-03-10T07:00:56.697 INFO:teuthology.orchestra.run.vm02.stdout: "target_image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", 2026-03-10T07:00:56.697 INFO:teuthology.orchestra.run.vm02.stdout: "in_progress": true, 2026-03-10T07:00:56.697 INFO:teuthology.orchestra.run.vm02.stdout: "services_complete": [], 2026-03-10T07:00:56.697 INFO:teuthology.orchestra.run.vm02.stdout: "progress": "", 2026-03-10T07:00:56.697 INFO:teuthology.orchestra.run.vm02.stdout: "message": "Doing first pull of quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df image" 2026-03-10T07:00:56.697 INFO:teuthology.orchestra.run.vm02.stdout:} 2026-03-10T07:00:56.753 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:00:56 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[49719]: ::ffff:192.168.123.105 - - [10/Mar/2026:07:00:56] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-10T07:00:57.161 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_OK 2026-03-10T07:00:57.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:00:57 vm05 ceph-mon[48591]: from='client.14910 -' entity='client.admin' cmd=[{"prefix": "orch upgrade start", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:00:57.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:00:57 vm05 ceph-mon[48591]: Upgrade: Started with target quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T07:00:57.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:00:57 vm05 ceph-mon[48591]: Upgrade: First pull of quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T07:00:57.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:00:57 vm05 ceph-mon[48591]: from='client.14916 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:00:57.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:00:57 vm05 ceph-mon[48591]: from='client.14922 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:00:57.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:00:57 vm05 ceph-mon[48591]: from='client.? 192.168.123.102:0/960743290' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:00:57.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:00:57 vm02 ceph-mon[54377]: from='client.14910 -' entity='client.admin' cmd=[{"prefix": "orch upgrade start", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:00:57.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:00:57 vm02 ceph-mon[54377]: Upgrade: Started with target quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T07:00:57.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:00:57 vm02 ceph-mon[54377]: Upgrade: First pull of quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T07:00:57.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:00:57 vm02 ceph-mon[54377]: from='client.14916 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:00:57.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:00:57 vm02 ceph-mon[54377]: from='client.14922 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:00:57.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:00:57 vm02 ceph-mon[54377]: from='client.? 192.168.123.102:0/960743290' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:00:57.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:00:57 vm02 ceph-mon[50158]: from='client.14910 -' entity='client.admin' cmd=[{"prefix": "orch upgrade start", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:00:57.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:00:57 vm02 ceph-mon[50158]: Upgrade: Started with target quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T07:00:57.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:00:57 vm02 ceph-mon[50158]: Upgrade: First pull of quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T07:00:57.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:00:57 vm02 ceph-mon[50158]: from='client.14916 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:00:57.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:00:57 vm02 ceph-mon[50158]: from='client.14922 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:00:57.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:00:57 vm02 ceph-mon[50158]: from='client.? 192.168.123.102:0/960743290' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:00:58.085 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:00:57 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[50367]: ::ffff:192.168.123.105 - - [10/Mar/2026:07:00:57] "GET /metrics HTTP/1.1" 200 214488 "" "Prometheus/2.33.4" 2026-03-10T07:00:58.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:00:58 vm05 ceph-mon[48591]: pgmap v114: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:00:58.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:00:58 vm05 ceph-mon[48591]: from='client.14928 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:00:58.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:00:58 vm05 ceph-mon[48591]: from='client.14784 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:00:58.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:00:58 vm05 ceph-mon[48591]: from='client.14937 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:00:58.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:00:58 vm05 ceph-mon[48591]: from='client.? 192.168.123.102:0/1573099981' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T07:00:58.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:00:58 vm02 ceph-mon[50158]: pgmap v114: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:00:58.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:00:58 vm02 ceph-mon[50158]: from='client.14928 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:00:58.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:00:58 vm02 ceph-mon[50158]: from='client.14784 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:00:58.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:00:58 vm02 ceph-mon[50158]: from='client.14937 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:00:58.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:00:58 vm02 ceph-mon[50158]: from='client.? 192.168.123.102:0/1573099981' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T07:00:58.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:00:58 vm02 ceph-mon[54377]: pgmap v114: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:00:58.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:00:58 vm02 ceph-mon[54377]: from='client.14928 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:00:58.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:00:58 vm02 ceph-mon[54377]: from='client.14784 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:00:58.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:00:58 vm02 ceph-mon[54377]: from='client.14937 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:00:58.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:00:58 vm02 ceph-mon[54377]: from='client.? 192.168.123.102:0/1573099981' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T07:00:59.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:00:59 vm05 ceph-mon[48591]: pgmap v115: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:00:59.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:00:59 vm02 ceph-mon[54377]: pgmap v115: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:00:59.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:00:59 vm02 ceph-mon[50158]: pgmap v115: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:01:01.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:01:01 vm02 ceph-mon[54377]: pgmap v116: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:01:01.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:01:01 vm02 ceph-mon[50158]: pgmap v116: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:01:01.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:01:01 vm05 ceph-mon[48591]: pgmap v116: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:01:03.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:01:03 vm05 ceph-mon[48591]: pgmap v117: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:01:03.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:01:03 vm02 ceph-mon[54377]: pgmap v117: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:01:03.835 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:01:03 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=error ts=2026-03-10T07:01:03.507Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.102:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.102 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.105:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-10T07:01:03.835 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:01:03 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=error ts=2026-03-10T07:01:03.508Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.105:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.102:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.102 because it doesn't contain any IP SANs" 2026-03-10T07:01:03.835 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:01:03 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=warn ts=2026-03-10T07:01:03.510Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.102:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.102 because it doesn't contain any IP SANs" 2026-03-10T07:01:03.835 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:01:03 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=warn ts=2026-03-10T07:01:03.510Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.105:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-10T07:01:03.835 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:01:03 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=warn ts=2026-03-10T07:01:03.511Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.102:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.102 because it doesn't contain any IP SANs" 2026-03-10T07:01:03.835 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:01:03 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=warn ts=2026-03-10T07:01:03.512Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.105:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-10T07:01:03.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:01:03 vm02 ceph-mon[50158]: pgmap v117: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:01:05.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:01:05 vm05 ceph-mon[48591]: pgmap v118: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:01:05.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:01:05 vm02 ceph-mon[50158]: pgmap v118: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:01:05.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:01:05 vm02 ceph-mon[54377]: pgmap v118: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:01:06.753 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:01:06 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[49719]: ::ffff:192.168.123.105 - - [10/Mar/2026:07:01:06] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-10T07:01:07.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:01:07 vm05 ceph-mon[48591]: pgmap v119: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:01:07.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:01:07 vm05 ceph-mon[48591]: from='client.14784 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:01:07.783 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:01:07 vm02 ceph-mon[54377]: pgmap v119: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:01:07.783 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:01:07 vm02 ceph-mon[54377]: from='client.14784 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:01:07.783 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:01:07 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[50367]: ::ffff:192.168.123.105 - - [10/Mar/2026:07:01:07] "GET /metrics HTTP/1.1" 200 214477 "" "Prometheus/2.33.4" 2026-03-10T07:01:07.783 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:01:07 vm02 ceph-mon[50158]: pgmap v119: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:01:07.783 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:01:07 vm02 ceph-mon[50158]: from='client.14784 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:01:10.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:01:10 vm05 ceph-mon[48591]: pgmap v120: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:01:10.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:01:10 vm02 ceph-mon[50158]: pgmap v120: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:01:10.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:01:10 vm02 ceph-mon[54377]: pgmap v120: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:01:12.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:01:12 vm05 ceph-mon[48591]: pgmap v121: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:01:12.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:01:12 vm02 ceph-mon[54377]: pgmap v121: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:01:12.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:01:12 vm02 ceph-mon[50158]: pgmap v121: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:01:13.835 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:01:13 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=error ts=2026-03-10T07:01:13.509Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.102:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.102 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.105:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-10T07:01:13.835 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:01:13 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=error ts=2026-03-10T07:01:13.509Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.105:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.102:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.102 because it doesn't contain any IP SANs" 2026-03-10T07:01:13.835 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:01:13 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=warn ts=2026-03-10T07:01:13.510Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.105:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-10T07:01:13.835 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:01:13 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=warn ts=2026-03-10T07:01:13.511Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.102:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.102 because it doesn't contain any IP SANs" 2026-03-10T07:01:13.835 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:01:13 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=warn ts=2026-03-10T07:01:13.512Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.105:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-10T07:01:13.835 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:01:13 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=warn ts=2026-03-10T07:01:13.512Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.102:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.102 because it doesn't contain any IP SANs" 2026-03-10T07:01:14.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:01:14 vm05 ceph-mon[48591]: pgmap v122: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:01:14.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:01:14 vm02 ceph-mon[54377]: pgmap v122: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:01:14.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:01:14 vm02 ceph-mon[50158]: pgmap v122: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:01:16.503 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:01:16 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[49719]: ::ffff:192.168.123.105 - - [10/Mar/2026:07:01:16] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-10T07:01:16.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:01:16 vm05 ceph-mon[48591]: pgmap v123: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:01:16.582 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:01:16 vm02 ceph-mon[54377]: pgmap v123: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:01:16.582 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:01:16 vm02 ceph-mon[50158]: pgmap v123: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:01:18.085 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:01:17 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[50367]: ::ffff:192.168.123.105 - - [10/Mar/2026:07:01:17] "GET /metrics HTTP/1.1" 200 214477 "" "Prometheus/2.33.4" 2026-03-10T07:01:18.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:01:18 vm05 ceph-mon[48591]: pgmap v124: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:01:18.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:01:18 vm05 ceph-mon[48591]: from='client.14784 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:01:18.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:01:18 vm02 ceph-mon[54377]: pgmap v124: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:01:18.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:01:18 vm02 ceph-mon[54377]: from='client.14784 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:01:18.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:01:18 vm02 ceph-mon[50158]: pgmap v124: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:01:18.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:01:18 vm02 ceph-mon[50158]: from='client.14784 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:01:20.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:01:20 vm05 ceph-mon[48591]: pgmap v125: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:01:20.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:01:20 vm02 ceph-mon[54377]: pgmap v125: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:01:20.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:01:20 vm02 ceph-mon[50158]: pgmap v125: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:01:22.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:01:22 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T07:01:22.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:01:22 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T07:01:22.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:01:22 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T07:01:22.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:01:22 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T07:01:22.834 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:01:22 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T07:01:22.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:01:22 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T07:01:23.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:01:23 vm02 ceph-mon[50158]: pgmap v126: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:01:23.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:01:23 vm02 ceph-mon[50158]: pgmap v127: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:01:23.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:01:23 vm02 ceph-mon[54377]: pgmap v126: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:01:23.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:01:23 vm02 ceph-mon[54377]: pgmap v127: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:01:23.835 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:01:23 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=error ts=2026-03-10T07:01:23.510Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.102:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.102 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.105:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-10T07:01:23.835 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:01:23 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=error ts=2026-03-10T07:01:23.510Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.102:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.102 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.105:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-10T07:01:23.835 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:01:23 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=warn ts=2026-03-10T07:01:23.512Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.105:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-10T07:01:23.835 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:01:23 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=warn ts=2026-03-10T07:01:23.513Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.105:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-10T07:01:23.835 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:01:23 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=warn ts=2026-03-10T07:01:23.516Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.102:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.102 because it doesn't contain any IP SANs" 2026-03-10T07:01:23.835 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:01:23 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=warn ts=2026-03-10T07:01:23.521Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.102:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.102 because it doesn't contain any IP SANs" 2026-03-10T07:01:24.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:01:23 vm05 ceph-mon[48591]: pgmap v126: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:01:24.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:01:23 vm05 ceph-mon[48591]: pgmap v127: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:01:26.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:01:25 vm05 ceph-mon[48591]: pgmap v128: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:01:26.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:01:25 vm02 ceph-mon[50158]: pgmap v128: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:01:26.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:01:25 vm02 ceph-mon[54377]: pgmap v128: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:01:26.753 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:01:26 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[49719]: ::ffff:192.168.123.105 - - [10/Mar/2026:07:01:26] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-10T07:01:27.427 INFO:teuthology.orchestra.run.vm02.stdout:true 2026-03-10T07:01:27.831 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T07:01:27.831 INFO:teuthology.orchestra.run.vm02.stdout:alertmanager.a vm02 *:9093,9094 running (2m) 2m ago 2m 14.6M - ba2b418f427c 519616b4f7a0 2026-03-10T07:01:27.831 INFO:teuthology.orchestra.run.vm02.stdout:grafana.a vm05 *:3000 running (2m) 2m ago 2m 46.8M - 8.3.5 dad864ee21e9 81ae9caed32e 2026-03-10T07:01:27.831 INFO:teuthology.orchestra.run.vm02.stdout:iscsi.foo.vm02.iphfbm vm02 running (2m) 2m ago 2m 67.0M - 3.5 e1d6a67b021e 204f70705475 2026-03-10T07:01:27.831 INFO:teuthology.orchestra.run.vm02.stdout:mgr.x vm05 *:8443 running (4m) 2m ago 4m 417M - 17.2.0 e1d6a67b021e 98ac9e4d7049 2026-03-10T07:01:27.831 INFO:teuthology.orchestra.run.vm02.stdout:mgr.y vm02 *:9283 running (5m) 2m ago 5m 463M - 17.2.0 e1d6a67b021e 1e8c9dfeac74 2026-03-10T07:01:27.831 INFO:teuthology.orchestra.run.vm02.stdout:mon.a vm02 running (5m) 2m ago 5m 51.9M 2048M 17.2.0 e1d6a67b021e 16a124fbc55b 2026-03-10T07:01:27.831 INFO:teuthology.orchestra.run.vm02.stdout:mon.b vm05 running (4m) 2m ago 4m 48.7M 2048M 17.2.0 e1d6a67b021e c223c571b7ce 2026-03-10T07:01:27.831 INFO:teuthology.orchestra.run.vm02.stdout:mon.c vm02 running (4m) 2m ago 4m 49.6M 2048M 17.2.0 e1d6a67b021e d42e67599bdc 2026-03-10T07:01:27.831 INFO:teuthology.orchestra.run.vm02.stdout:node-exporter.a vm02 *:9100 running (3m) 2m ago 3m 13.5M - 1dbe0e931976 79b6fdafea6a 2026-03-10T07:01:27.831 INFO:teuthology.orchestra.run.vm02.stdout:node-exporter.b vm05 *:9100 running (3m) 2m ago 3m 15.0M - 1dbe0e931976 4912deba3ea0 2026-03-10T07:01:27.831 INFO:teuthology.orchestra.run.vm02.stdout:osd.0 vm02 running (4m) 2m ago 4m 47.7M 4096M 17.2.0 e1d6a67b021e 0e972212a251 2026-03-10T07:01:27.831 INFO:teuthology.orchestra.run.vm02.stdout:osd.1 vm02 running (4m) 2m ago 4m 52.3M 4096M 17.2.0 e1d6a67b021e a9f4c8b44f48 2026-03-10T07:01:27.831 INFO:teuthology.orchestra.run.vm02.stdout:osd.2 vm02 running (4m) 2m ago 4m 45.2M 4096M 17.2.0 e1d6a67b021e 6409c5be18aa 2026-03-10T07:01:27.831 INFO:teuthology.orchestra.run.vm02.stdout:osd.3 vm02 running (3m) 2m ago 3m 45.6M 4096M 17.2.0 e1d6a67b021e 5df7cfae139f 2026-03-10T07:01:27.831 INFO:teuthology.orchestra.run.vm02.stdout:osd.4 vm05 running (3m) 2m ago 3m 48.8M 4096M 17.2.0 e1d6a67b021e dcf9e1b40e11 2026-03-10T07:01:27.831 INFO:teuthology.orchestra.run.vm02.stdout:osd.5 vm05 running (3m) 2m ago 3m 46.7M 4096M 17.2.0 e1d6a67b021e ef1a70593f89 2026-03-10T07:01:27.831 INFO:teuthology.orchestra.run.vm02.stdout:osd.6 vm05 running (3m) 2m ago 3m 48.8M 4096M 17.2.0 e1d6a67b021e 9f83e32d5eb6 2026-03-10T07:01:27.831 INFO:teuthology.orchestra.run.vm02.stdout:osd.7 vm05 running (3m) 2m ago 3m 47.3M 4096M 17.2.0 e1d6a67b021e 83d454094982 2026-03-10T07:01:27.831 INFO:teuthology.orchestra.run.vm02.stdout:prometheus.a vm05 *:9095 running (2m) 2m ago 2m 34.5M - 514e6a882f6e 354a9ecb0815 2026-03-10T07:01:27.831 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.kkmsll vm02 *:8000 running (2m) 2m ago 2m 91.0M - 17.2.0 e1d6a67b021e ef903c439808 2026-03-10T07:01:27.832 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm05.bmslvs vm05 *:8000 running (2m) 2m ago 2m 90.8M - 17.2.0 e1d6a67b021e acd35f4810d9 2026-03-10T07:01:27.832 INFO:teuthology.orchestra.run.vm02.stdout:rgw.smpl.vm02.kyvfxo vm02 *:80 running (2m) 2m ago 2m 89.1M - 17.2.0 e1d6a67b021e 6c68381e5378 2026-03-10T07:01:27.832 INFO:teuthology.orchestra.run.vm02.stdout:rgw.smpl.vm05.xjafam vm05 *:80 running (2m) 2m ago 2m 91.0M - 17.2.0 e1d6a67b021e 62a81876b05e 2026-03-10T07:01:28.059 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:01:27 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[50367]: ::ffff:192.168.123.105 - - [10/Mar/2026:07:01:27] "GET /metrics HTTP/1.1" 200 214452 "" "Prometheus/2.33.4" 2026-03-10T07:01:28.060 INFO:teuthology.orchestra.run.vm02.stdout:{ 2026-03-10T07:01:28.060 INFO:teuthology.orchestra.run.vm02.stdout: "mon": { 2026-03-10T07:01:28.060 INFO:teuthology.orchestra.run.vm02.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 3 2026-03-10T07:01:28.060 INFO:teuthology.orchestra.run.vm02.stdout: }, 2026-03-10T07:01:28.060 INFO:teuthology.orchestra.run.vm02.stdout: "mgr": { 2026-03-10T07:01:28.060 INFO:teuthology.orchestra.run.vm02.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 2 2026-03-10T07:01:28.060 INFO:teuthology.orchestra.run.vm02.stdout: }, 2026-03-10T07:01:28.060 INFO:teuthology.orchestra.run.vm02.stdout: "osd": { 2026-03-10T07:01:28.060 INFO:teuthology.orchestra.run.vm02.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 8 2026-03-10T07:01:28.060 INFO:teuthology.orchestra.run.vm02.stdout: }, 2026-03-10T07:01:28.060 INFO:teuthology.orchestra.run.vm02.stdout: "mds": {}, 2026-03-10T07:01:28.060 INFO:teuthology.orchestra.run.vm02.stdout: "rgw": { 2026-03-10T07:01:28.060 INFO:teuthology.orchestra.run.vm02.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 4 2026-03-10T07:01:28.060 INFO:teuthology.orchestra.run.vm02.stdout: }, 2026-03-10T07:01:28.060 INFO:teuthology.orchestra.run.vm02.stdout: "overall": { 2026-03-10T07:01:28.060 INFO:teuthology.orchestra.run.vm02.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 17 2026-03-10T07:01:28.060 INFO:teuthology.orchestra.run.vm02.stdout: } 2026-03-10T07:01:28.060 INFO:teuthology.orchestra.run.vm02.stdout:} 2026-03-10T07:01:28.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:01:28 vm02 ceph-mon[50158]: pgmap v129: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:01:28.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:01:28 vm02 ceph-mon[50158]: from='client.14784 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:01:28.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:01:28 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T07:01:28.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:01:28 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T07:01:28.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:01:28 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T07:01:28.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:01:28 vm02 ceph-mon[54377]: pgmap v129: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:01:28.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:01:28 vm02 ceph-mon[54377]: from='client.14784 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:01:28.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:01:28 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T07:01:28.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:01:28 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T07:01:28.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:01:28 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T07:01:28.342 INFO:teuthology.orchestra.run.vm02.stdout:{ 2026-03-10T07:01:28.342 INFO:teuthology.orchestra.run.vm02.stdout: "target_image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", 2026-03-10T07:01:28.342 INFO:teuthology.orchestra.run.vm02.stdout: "in_progress": true, 2026-03-10T07:01:28.342 INFO:teuthology.orchestra.run.vm02.stdout: "services_complete": [], 2026-03-10T07:01:28.342 INFO:teuthology.orchestra.run.vm02.stdout: "progress": "0/23 daemons upgraded", 2026-03-10T07:01:28.342 INFO:teuthology.orchestra.run.vm02.stdout: "message": "Pulling quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df image on host vm05" 2026-03-10T07:01:28.342 INFO:teuthology.orchestra.run.vm02.stdout:} 2026-03-10T07:01:28.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:01:28 vm05 ceph-mon[48591]: pgmap v129: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:01:28.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:01:28 vm05 ceph-mon[48591]: from='client.14784 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:01:28.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:01:28 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T07:01:28.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:01:28 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T07:01:28.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:01:28 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T07:01:28.563 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_OK 2026-03-10T07:01:29.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:01:29 vm05 ceph-mon[48591]: Upgrade: Target is version 19.2.3-678-ge911bdeb (unknown) 2026-03-10T07:01:29.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:01:29 vm05 ceph-mon[48591]: Upgrade: Target container is quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, digests ['quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc'] 2026-03-10T07:01:29.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:01:29 vm05 ceph-mon[48591]: Upgrade: Need to upgrade myself (mgr.y) 2026-03-10T07:01:29.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:01:29 vm05 ceph-mon[48591]: from='client.24707 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:01:29.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:01:29 vm05 ceph-mon[48591]: Upgrade: Pulling quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df on vm05 2026-03-10T07:01:29.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:01:29 vm05 ceph-mon[48591]: from='client.24799 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:01:29.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:01:29 vm05 ceph-mon[48591]: from='client.24805 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:01:29.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:01:29 vm05 ceph-mon[48591]: from='client.? 192.168.123.102:0/440552528' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:01:29.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:01:29 vm05 ceph-mon[48591]: from='client.? 192.168.123.102:0/1471064628' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T07:01:29.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:01:29 vm02 ceph-mon[54377]: Upgrade: Target is version 19.2.3-678-ge911bdeb (unknown) 2026-03-10T07:01:29.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:01:29 vm02 ceph-mon[54377]: Upgrade: Target container is quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, digests ['quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc'] 2026-03-10T07:01:29.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:01:29 vm02 ceph-mon[54377]: Upgrade: Need to upgrade myself (mgr.y) 2026-03-10T07:01:29.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:01:29 vm02 ceph-mon[54377]: from='client.24707 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:01:29.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:01:29 vm02 ceph-mon[54377]: Upgrade: Pulling quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df on vm05 2026-03-10T07:01:29.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:01:29 vm02 ceph-mon[54377]: from='client.24799 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:01:29.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:01:29 vm02 ceph-mon[54377]: from='client.24805 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:01:29.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:01:29 vm02 ceph-mon[54377]: from='client.? 192.168.123.102:0/440552528' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:01:29.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:01:29 vm02 ceph-mon[54377]: from='client.? 192.168.123.102:0/1471064628' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T07:01:29.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:01:29 vm02 ceph-mon[50158]: Upgrade: Target is version 19.2.3-678-ge911bdeb (unknown) 2026-03-10T07:01:29.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:01:29 vm02 ceph-mon[50158]: Upgrade: Target container is quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, digests ['quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc'] 2026-03-10T07:01:29.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:01:29 vm02 ceph-mon[50158]: Upgrade: Need to upgrade myself (mgr.y) 2026-03-10T07:01:29.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:01:29 vm02 ceph-mon[50158]: from='client.24707 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:01:29.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:01:29 vm02 ceph-mon[50158]: Upgrade: Pulling quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df on vm05 2026-03-10T07:01:29.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:01:29 vm02 ceph-mon[50158]: from='client.24799 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:01:29.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:01:29 vm02 ceph-mon[50158]: from='client.24805 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:01:29.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:01:29 vm02 ceph-mon[50158]: from='client.? 192.168.123.102:0/440552528' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:01:29.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:01:29 vm02 ceph-mon[50158]: from='client.? 192.168.123.102:0/1471064628' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T07:01:30.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:01:30 vm05 ceph-mon[48591]: pgmap v130: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:01:30.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:01:30 vm05 ceph-mon[48591]: from='client.24817 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:01:30.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:01:30 vm02 ceph-mon[54377]: pgmap v130: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:01:30.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:01:30 vm02 ceph-mon[54377]: from='client.24817 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:01:30.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:01:30 vm02 ceph-mon[50158]: pgmap v130: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:01:30.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:01:30 vm02 ceph-mon[50158]: from='client.24817 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:01:32.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:01:32 vm05 ceph-mon[48591]: pgmap v131: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:01:32.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:01:32 vm02 ceph-mon[54377]: pgmap v131: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:01:32.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:01:32 vm02 ceph-mon[50158]: pgmap v131: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:01:33.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:01:33 vm05 ceph-mon[48591]: pgmap v132: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:01:33.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:01:33 vm02 ceph-mon[54377]: pgmap v132: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:01:33.835 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:01:33 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=error ts=2026-03-10T07:01:33.511Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.105:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.102:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.102 because it doesn't contain any IP SANs" 2026-03-10T07:01:33.835 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:01:33 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=error ts=2026-03-10T07:01:33.511Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.102:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.102 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.105:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-10T07:01:33.835 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:01:33 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=warn ts=2026-03-10T07:01:33.513Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.105:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-10T07:01:33.835 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:01:33 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=warn ts=2026-03-10T07:01:33.513Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.102:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.102 because it doesn't contain any IP SANs" 2026-03-10T07:01:33.835 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:01:33 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=warn ts=2026-03-10T07:01:33.514Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.105:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-10T07:01:33.835 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:01:33 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=warn ts=2026-03-10T07:01:33.515Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.102:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.102 because it doesn't contain any IP SANs" 2026-03-10T07:01:33.836 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:01:33 vm02 ceph-mon[50158]: pgmap v132: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:01:35.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:01:35 vm05 ceph-mon[48591]: pgmap v133: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:01:35.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:01:35 vm02 ceph-mon[54377]: pgmap v133: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:01:35.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:01:35 vm02 ceph-mon[50158]: pgmap v133: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:01:36.753 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:01:36 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[49719]: ::ffff:192.168.123.105 - - [10/Mar/2026:07:01:36] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-10T07:01:37.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:01:37 vm05 ceph-mon[48591]: pgmap v134: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:01:37.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:01:37 vm05 ceph-mon[48591]: from='client.14784 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:01:37.783 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:01:37 vm02 ceph-mon[54377]: pgmap v134: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:01:37.783 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:01:37 vm02 ceph-mon[54377]: from='client.14784 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:01:37.783 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:01:37 vm02 ceph-mon[50158]: pgmap v134: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:01:37.783 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:01:37 vm02 ceph-mon[50158]: from='client.14784 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:01:38.085 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:01:37 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[50367]: ::ffff:192.168.123.105 - - [10/Mar/2026:07:01:37] "GET /metrics HTTP/1.1" 200 214415 "" "Prometheus/2.33.4" 2026-03-10T07:01:39.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:01:39 vm05 ceph-mon[48591]: pgmap v135: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:01:39.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:01:39 vm02 ceph-mon[54377]: pgmap v135: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:01:39.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:01:39 vm02 ceph-mon[50158]: pgmap v135: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:01:41.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:01:41 vm05 ceph-mon[48591]: pgmap v136: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:01:41.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:01:41 vm02 ceph-mon[54377]: pgmap v136: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:01:41.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:01:41 vm02 ceph-mon[50158]: pgmap v136: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:01:43.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:01:43 vm05 ceph-mon[48591]: pgmap v137: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:01:43.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:01:43 vm02 ceph-mon[54377]: pgmap v137: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:01:43.835 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:01:43 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=error ts=2026-03-10T07:01:43.512Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.105:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.102:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.102 because it doesn't contain any IP SANs" 2026-03-10T07:01:43.835 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:01:43 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=error ts=2026-03-10T07:01:43.512Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.102:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.102 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.105:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-10T07:01:43.835 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:01:43 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=warn ts=2026-03-10T07:01:43.514Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.105:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-10T07:01:43.835 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:01:43 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=warn ts=2026-03-10T07:01:43.514Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.102:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.102 because it doesn't contain any IP SANs" 2026-03-10T07:01:43.835 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:01:43 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=warn ts=2026-03-10T07:01:43.515Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.105:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-10T07:01:43.835 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:01:43 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=warn ts=2026-03-10T07:01:43.516Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.102:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.102 because it doesn't contain any IP SANs" 2026-03-10T07:01:43.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:01:43 vm02 ceph-mon[50158]: pgmap v137: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:01:45.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:01:45 vm05 ceph-mon[48591]: pgmap v138: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:01:45.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:01:45 vm02 ceph-mon[54377]: pgmap v138: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:01:45.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:01:45 vm02 ceph-mon[50158]: pgmap v138: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:01:46.753 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:01:46 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[49719]: ::ffff:192.168.123.105 - - [10/Mar/2026:07:01:46] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-10T07:01:47.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:01:47 vm05 ceph-mon[48591]: pgmap v139: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:01:47.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:01:47 vm05 ceph-mon[48591]: from='client.14784 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:01:47.782 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:01:47 vm02 ceph-mon[54377]: pgmap v139: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:01:47.782 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:01:47 vm02 ceph-mon[54377]: from='client.14784 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:01:47.783 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:01:47 vm02 ceph-mon[50158]: pgmap v139: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:01:47.783 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:01:47 vm02 ceph-mon[50158]: from='client.14784 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:01:48.084 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:01:47 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[50367]: ::ffff:192.168.123.105 - - [10/Mar/2026:07:01:47] "GET /metrics HTTP/1.1" 200 214415 "" "Prometheus/2.33.4" 2026-03-10T07:01:49.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:01:49 vm05 ceph-mon[48591]: pgmap v140: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:01:49.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:01:49 vm02 ceph-mon[54377]: pgmap v140: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:01:49.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:01:49 vm02 ceph-mon[50158]: pgmap v140: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:01:51.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:01:51 vm05 ceph-mon[48591]: pgmap v141: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:01:51.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:01:51 vm02 ceph-mon[54377]: pgmap v141: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:01:51.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:01:51 vm02 ceph-mon[50158]: pgmap v141: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:01:53.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:01:53 vm05 ceph-mon[48591]: pgmap v142: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:01:53.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:01:53 vm02 ceph-mon[54377]: pgmap v142: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:01:53.835 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:01:53 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=error ts=2026-03-10T07:01:53.513Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.105:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.102:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.102 because it doesn't contain any IP SANs" 2026-03-10T07:01:53.835 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:01:53 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=error ts=2026-03-10T07:01:53.514Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.105:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.102:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.102 because it doesn't contain any IP SANs" 2026-03-10T07:01:53.835 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:01:53 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=warn ts=2026-03-10T07:01:53.515Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.105:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-10T07:01:53.835 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:01:53 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=warn ts=2026-03-10T07:01:53.515Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.102:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.102 because it doesn't contain any IP SANs" 2026-03-10T07:01:53.835 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:01:53 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=warn ts=2026-03-10T07:01:53.516Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.105:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-10T07:01:53.835 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:01:53 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=warn ts=2026-03-10T07:01:53.517Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.102:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.102 because it doesn't contain any IP SANs" 2026-03-10T07:01:53.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:01:53 vm02 ceph-mon[50158]: pgmap v142: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:01:55.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:01:55 vm05 ceph-mon[48591]: pgmap v143: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:01:55.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:01:55 vm02 ceph-mon[54377]: pgmap v143: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:01:55.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:01:55 vm02 ceph-mon[50158]: pgmap v143: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:01:56.753 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:01:56 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[49719]: ::ffff:192.168.123.105 - - [10/Mar/2026:07:01:56] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-10T07:01:57.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:01:57 vm05 ceph-mon[48591]: pgmap v144: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:01:57.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:01:57 vm05 ceph-mon[48591]: from='client.14784 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:01:57.782 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:01:57 vm02 ceph-mon[54377]: pgmap v144: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:01:57.782 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:01:57 vm02 ceph-mon[54377]: from='client.14784 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:01:57.783 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:01:57 vm02 ceph-mon[50158]: pgmap v144: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:01:57.783 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:01:57 vm02 ceph-mon[50158]: from='client.14784 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:01:58.085 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:01:57 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[50367]: ::ffff:192.168.123.105 - - [10/Mar/2026:07:01:57] "GET /metrics HTTP/1.1" 200 214414 "" "Prometheus/2.33.4" 2026-03-10T07:01:58.763 INFO:teuthology.orchestra.run.vm02.stdout:true 2026-03-10T07:01:59.128 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T07:01:59.128 INFO:teuthology.orchestra.run.vm02.stdout:alertmanager.a vm02 *:9093,9094 running (3m) 2m ago 3m 14.6M - ba2b418f427c 519616b4f7a0 2026-03-10T07:01:59.128 INFO:teuthology.orchestra.run.vm02.stdout:grafana.a vm05 *:3000 running (3m) 2m ago 3m 46.8M - 8.3.5 dad864ee21e9 81ae9caed32e 2026-03-10T07:01:59.128 INFO:teuthology.orchestra.run.vm02.stdout:iscsi.foo.vm02.iphfbm vm02 running (3m) 2m ago 3m 67.0M - 3.5 e1d6a67b021e 204f70705475 2026-03-10T07:01:59.128 INFO:teuthology.orchestra.run.vm02.stdout:mgr.x vm05 *:8443 running (4m) 2m ago 4m 417M - 17.2.0 e1d6a67b021e 98ac9e4d7049 2026-03-10T07:01:59.128 INFO:teuthology.orchestra.run.vm02.stdout:mgr.y vm02 *:9283 running (5m) 2m ago 5m 463M - 17.2.0 e1d6a67b021e 1e8c9dfeac74 2026-03-10T07:01:59.128 INFO:teuthology.orchestra.run.vm02.stdout:mon.a vm02 running (5m) 2m ago 5m 51.9M 2048M 17.2.0 e1d6a67b021e 16a124fbc55b 2026-03-10T07:01:59.128 INFO:teuthology.orchestra.run.vm02.stdout:mon.b vm05 running (5m) 2m ago 5m 48.7M 2048M 17.2.0 e1d6a67b021e c223c571b7ce 2026-03-10T07:01:59.128 INFO:teuthology.orchestra.run.vm02.stdout:mon.c vm02 running (5m) 2m ago 5m 49.6M 2048M 17.2.0 e1d6a67b021e d42e67599bdc 2026-03-10T07:01:59.128 INFO:teuthology.orchestra.run.vm02.stdout:node-exporter.a vm02 *:9100 running (3m) 2m ago 3m 13.5M - 1dbe0e931976 79b6fdafea6a 2026-03-10T07:01:59.128 INFO:teuthology.orchestra.run.vm02.stdout:node-exporter.b vm05 *:9100 running (3m) 2m ago 3m 15.0M - 1dbe0e931976 4912deba3ea0 2026-03-10T07:01:59.128 INFO:teuthology.orchestra.run.vm02.stdout:osd.0 vm02 running (4m) 2m ago 4m 47.7M 4096M 17.2.0 e1d6a67b021e 0e972212a251 2026-03-10T07:01:59.128 INFO:teuthology.orchestra.run.vm02.stdout:osd.1 vm02 running (4m) 2m ago 4m 52.3M 4096M 17.2.0 e1d6a67b021e a9f4c8b44f48 2026-03-10T07:01:59.128 INFO:teuthology.orchestra.run.vm02.stdout:osd.2 vm02 running (4m) 2m ago 4m 45.2M 4096M 17.2.0 e1d6a67b021e 6409c5be18aa 2026-03-10T07:01:59.128 INFO:teuthology.orchestra.run.vm02.stdout:osd.3 vm02 running (4m) 2m ago 4m 45.6M 4096M 17.2.0 e1d6a67b021e 5df7cfae139f 2026-03-10T07:01:59.128 INFO:teuthology.orchestra.run.vm02.stdout:osd.4 vm05 running (4m) 2m ago 4m 48.8M 4096M 17.2.0 e1d6a67b021e dcf9e1b40e11 2026-03-10T07:01:59.128 INFO:teuthology.orchestra.run.vm02.stdout:osd.5 vm05 running (4m) 2m ago 4m 46.7M 4096M 17.2.0 e1d6a67b021e ef1a70593f89 2026-03-10T07:01:59.128 INFO:teuthology.orchestra.run.vm02.stdout:osd.6 vm05 running (4m) 2m ago 4m 48.8M 4096M 17.2.0 e1d6a67b021e 9f83e32d5eb6 2026-03-10T07:01:59.128 INFO:teuthology.orchestra.run.vm02.stdout:osd.7 vm05 running (3m) 2m ago 3m 47.3M 4096M 17.2.0 e1d6a67b021e 83d454094982 2026-03-10T07:01:59.128 INFO:teuthology.orchestra.run.vm02.stdout:prometheus.a vm05 *:9095 running (3m) 2m ago 3m 34.5M - 514e6a882f6e 354a9ecb0815 2026-03-10T07:01:59.128 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.kkmsll vm02 *:8000 running (3m) 2m ago 3m 91.0M - 17.2.0 e1d6a67b021e ef903c439808 2026-03-10T07:01:59.128 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm05.bmslvs vm05 *:8000 running (3m) 2m ago 3m 90.8M - 17.2.0 e1d6a67b021e acd35f4810d9 2026-03-10T07:01:59.128 INFO:teuthology.orchestra.run.vm02.stdout:rgw.smpl.vm02.kyvfxo vm02 *:80 running (3m) 2m ago 3m 89.1M - 17.2.0 e1d6a67b021e 6c68381e5378 2026-03-10T07:01:59.128 INFO:teuthology.orchestra.run.vm02.stdout:rgw.smpl.vm05.xjafam vm05 *:80 running (3m) 2m ago 3m 91.0M - 17.2.0 e1d6a67b021e 62a81876b05e 2026-03-10T07:01:59.347 INFO:teuthology.orchestra.run.vm02.stdout:{ 2026-03-10T07:01:59.348 INFO:teuthology.orchestra.run.vm02.stdout: "mon": { 2026-03-10T07:01:59.348 INFO:teuthology.orchestra.run.vm02.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 3 2026-03-10T07:01:59.348 INFO:teuthology.orchestra.run.vm02.stdout: }, 2026-03-10T07:01:59.348 INFO:teuthology.orchestra.run.vm02.stdout: "mgr": { 2026-03-10T07:01:59.348 INFO:teuthology.orchestra.run.vm02.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 2 2026-03-10T07:01:59.348 INFO:teuthology.orchestra.run.vm02.stdout: }, 2026-03-10T07:01:59.348 INFO:teuthology.orchestra.run.vm02.stdout: "osd": { 2026-03-10T07:01:59.348 INFO:teuthology.orchestra.run.vm02.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 8 2026-03-10T07:01:59.348 INFO:teuthology.orchestra.run.vm02.stdout: }, 2026-03-10T07:01:59.348 INFO:teuthology.orchestra.run.vm02.stdout: "mds": {}, 2026-03-10T07:01:59.348 INFO:teuthology.orchestra.run.vm02.stdout: "rgw": { 2026-03-10T07:01:59.348 INFO:teuthology.orchestra.run.vm02.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 4 2026-03-10T07:01:59.348 INFO:teuthology.orchestra.run.vm02.stdout: }, 2026-03-10T07:01:59.348 INFO:teuthology.orchestra.run.vm02.stdout: "overall": { 2026-03-10T07:01:59.348 INFO:teuthology.orchestra.run.vm02.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 17 2026-03-10T07:01:59.348 INFO:teuthology.orchestra.run.vm02.stdout: } 2026-03-10T07:01:59.348 INFO:teuthology.orchestra.run.vm02.stdout:} 2026-03-10T07:01:59.545 INFO:teuthology.orchestra.run.vm02.stdout:{ 2026-03-10T07:01:59.545 INFO:teuthology.orchestra.run.vm02.stdout: "target_image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", 2026-03-10T07:01:59.545 INFO:teuthology.orchestra.run.vm02.stdout: "in_progress": true, 2026-03-10T07:01:59.546 INFO:teuthology.orchestra.run.vm02.stdout: "services_complete": [], 2026-03-10T07:01:59.546 INFO:teuthology.orchestra.run.vm02.stdout: "progress": "0/23 daemons upgraded", 2026-03-10T07:01:59.546 INFO:teuthology.orchestra.run.vm02.stdout: "message": "Pulling quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df image on host vm05" 2026-03-10T07:01:59.546 INFO:teuthology.orchestra.run.vm02.stdout:} 2026-03-10T07:01:59.759 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_OK 2026-03-10T07:01:59.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:01:59 vm02 ceph-mon[54377]: pgmap v145: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:01:59.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:01:59 vm02 ceph-mon[54377]: from='client.24829 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:01:59.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:01:59 vm02 ceph-mon[54377]: from='client.14985 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:01:59.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:01:59 vm02 ceph-mon[54377]: from='client.24841 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:01:59.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:01:59 vm02 ceph-mon[54377]: from='client.? 192.168.123.102:0/2832667243' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:01:59.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:01:59 vm02 ceph-mon[54377]: from='client.? 192.168.123.102:0/31053701' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T07:01:59.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:01:59 vm02 ceph-mon[50158]: pgmap v145: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:01:59.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:01:59 vm02 ceph-mon[50158]: from='client.24829 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:01:59.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:01:59 vm02 ceph-mon[50158]: from='client.14985 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:01:59.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:01:59 vm02 ceph-mon[50158]: from='client.24841 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:01:59.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:01:59 vm02 ceph-mon[50158]: from='client.? 192.168.123.102:0/2832667243' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:01:59.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:01:59 vm02 ceph-mon[50158]: from='client.? 192.168.123.102:0/31053701' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T07:02:00.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:01:59 vm05 ceph-mon[48591]: pgmap v145: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:02:00.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:01:59 vm05 ceph-mon[48591]: from='client.24829 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:02:00.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:01:59 vm05 ceph-mon[48591]: from='client.14985 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:02:00.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:01:59 vm05 ceph-mon[48591]: from='client.24841 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:02:00.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:01:59 vm05 ceph-mon[48591]: from='client.? 192.168.123.102:0/2832667243' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:02:00.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:01:59 vm05 ceph-mon[48591]: from='client.? 192.168.123.102:0/31053701' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T07:02:01.084 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:00 vm02 ceph-mon[54377]: from='client.24853 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:02:01.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:00 vm02 ceph-mon[50158]: from='client.24853 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:02:01.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:00 vm05 ceph-mon[48591]: from='client.24853 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:02:02.084 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:01 vm02 ceph-mon[54377]: pgmap v146: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:02:02.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:01 vm02 ceph-mon[50158]: pgmap v146: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:02:02.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:01 vm05 ceph-mon[48591]: pgmap v146: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:02:03.835 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:02:03 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=error ts=2026-03-10T07:02:03.514Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.105:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.102:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.102 because it doesn't contain any IP SANs" 2026-03-10T07:02:03.835 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:02:03 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=error ts=2026-03-10T07:02:03.514Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.102:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.102 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.105:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-10T07:02:03.835 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:02:03 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=warn ts=2026-03-10T07:02:03.516Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.102:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.102 because it doesn't contain any IP SANs" 2026-03-10T07:02:03.835 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:02:03 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=warn ts=2026-03-10T07:02:03.516Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.105:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-10T07:02:03.835 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:02:03 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=warn ts=2026-03-10T07:02:03.517Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.102:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.102 because it doesn't contain any IP SANs" 2026-03-10T07:02:03.835 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:02:03 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=warn ts=2026-03-10T07:02:03.517Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.105:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-10T07:02:04.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:03 vm05 ceph-mon[48591]: pgmap v147: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:02:04.334 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:03 vm02 ceph-mon[54377]: pgmap v147: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:02:04.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:03 vm02 ceph-mon[50158]: pgmap v147: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:02:06.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:05 vm05 ceph-mon[48591]: pgmap v148: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:02:06.334 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:05 vm02 ceph-mon[54377]: pgmap v148: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:02:06.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:05 vm02 ceph-mon[50158]: pgmap v148: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:02:06.753 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:02:06 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[49719]: ::ffff:192.168.123.105 - - [10/Mar/2026:07:02:06] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-10T07:02:08.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:07 vm02 ceph-mon[54377]: pgmap v149: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:02:08.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:07 vm02 ceph-mon[54377]: from='client.14784 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:02:08.085 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:02:07 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[50367]: ::ffff:192.168.123.105 - - [10/Mar/2026:07:02:07] "GET /metrics HTTP/1.1" 200 214418 "" "Prometheus/2.33.4" 2026-03-10T07:02:08.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:07 vm02 ceph-mon[50158]: pgmap v149: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:02:08.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:07 vm02 ceph-mon[50158]: from='client.14784 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:02:08.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:07 vm05 ceph-mon[48591]: pgmap v149: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:02:08.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:07 vm05 ceph-mon[48591]: from='client.14784 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:02:10.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:09 vm05 ceph-mon[48591]: pgmap v150: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:02:10.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:09 vm02 ceph-mon[54377]: pgmap v150: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:02:10.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:09 vm02 ceph-mon[50158]: pgmap v150: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:02:12.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:11 vm05 ceph-mon[48591]: pgmap v151: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:02:12.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:11 vm02 ceph-mon[54377]: pgmap v151: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:02:12.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:11 vm02 ceph-mon[50158]: pgmap v151: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:02:13.835 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:02:13 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=error ts=2026-03-10T07:02:13.514Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.105:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.102:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.102 because it doesn't contain any IP SANs" 2026-03-10T07:02:13.835 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:02:13 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=error ts=2026-03-10T07:02:13.515Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.105:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.102:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.102 because it doesn't contain any IP SANs" 2026-03-10T07:02:13.835 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:02:13 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=warn ts=2026-03-10T07:02:13.517Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.105:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-10T07:02:13.835 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:02:13 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=warn ts=2026-03-10T07:02:13.517Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.102:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.102 because it doesn't contain any IP SANs" 2026-03-10T07:02:13.835 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:02:13 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=warn ts=2026-03-10T07:02:13.518Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.102:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.102 because it doesn't contain any IP SANs" 2026-03-10T07:02:13.835 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:02:13 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=warn ts=2026-03-10T07:02:13.518Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.105:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-10T07:02:14.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:13 vm05 ceph-mon[48591]: pgmap v152: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:02:14.334 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:13 vm02 ceph-mon[54377]: pgmap v152: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:02:14.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:13 vm02 ceph-mon[50158]: pgmap v152: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:02:16.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:15 vm05 ceph-mon[48591]: pgmap v153: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:02:16.334 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:15 vm02 ceph-mon[54377]: pgmap v153: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:02:16.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:15 vm02 ceph-mon[50158]: pgmap v153: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:02:16.753 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:02:16 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[49719]: ::ffff:192.168.123.105 - - [10/Mar/2026:07:02:16] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-10T07:02:18.084 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:02:17 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[50367]: ::ffff:192.168.123.105 - - [10/Mar/2026:07:02:17] "GET /metrics HTTP/1.1" 200 214418 "" "Prometheus/2.33.4" 2026-03-10T07:02:18.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:17 vm02 ceph-mon[54377]: pgmap v154: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:02:18.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:17 vm02 ceph-mon[54377]: from='client.14784 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:02:18.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:17 vm02 ceph-mon[50158]: pgmap v154: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:02:18.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:17 vm02 ceph-mon[50158]: from='client.14784 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:02:18.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:17 vm05 ceph-mon[48591]: pgmap v154: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:02:18.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:17 vm05 ceph-mon[48591]: from='client.14784 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:02:20.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:19 vm05 ceph-mon[48591]: pgmap v155: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:02:20.334 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:19 vm02 ceph-mon[54377]: pgmap v155: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:02:20.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:19 vm02 ceph-mon[50158]: pgmap v155: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:02:21.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:20 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T07:02:21.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:20 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T07:02:21.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:20 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T07:02:21.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:20 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T07:02:21.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:20 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T07:02:21.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:20 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T07:02:22.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:21 vm05 ceph-mon[48591]: pgmap v156: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:02:22.334 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:21 vm02 ceph-mon[54377]: pgmap v156: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:02:22.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:21 vm02 ceph-mon[50158]: pgmap v156: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:02:23.835 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:02:23 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=error ts=2026-03-10T07:02:23.515Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.105:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.102:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.102 because it doesn't contain any IP SANs" 2026-03-10T07:02:23.835 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:02:23 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=error ts=2026-03-10T07:02:23.516Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.105:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.102:8443//api/prometheus_receiver\": context deadline exceeded" 2026-03-10T07:02:23.835 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:02:23 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=warn ts=2026-03-10T07:02:23.517Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.105:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-10T07:02:23.835 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:02:23 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=warn ts=2026-03-10T07:02:23.517Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.102:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.102 because it doesn't contain any IP SANs" 2026-03-10T07:02:23.835 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:02:23 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=warn ts=2026-03-10T07:02:23.518Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.105:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-10T07:02:23.835 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:02:23 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=warn ts=2026-03-10T07:02:23.518Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.102:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.102 because it doesn't contain any IP SANs" 2026-03-10T07:02:24.334 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:24 vm02 ceph-mon[54377]: pgmap v157: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:02:24.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:24 vm02 ceph-mon[50158]: pgmap v157: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:02:24.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:24 vm05 ceph-mon[48591]: pgmap v157: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:02:26.334 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:26 vm02 ceph-mon[54377]: pgmap v158: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:02:26.334 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:26 vm02 ceph-mon[50158]: pgmap v158: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:02:26.392 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:26 vm05 ceph-mon[48591]: pgmap v158: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:02:26.393 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:02:26 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[49719]: ::ffff:192.168.123.105 - - [10/Mar/2026:07:02:26] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-10T07:02:28.084 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:28 vm02 ceph-mon[54377]: pgmap v159: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:02:28.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:28 vm02 ceph-mon[54377]: from='client.14784 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:02:28.085 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:02:27 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[50367]: ::ffff:192.168.123.105 - - [10/Mar/2026:07:02:27] "GET /metrics HTTP/1.1" 200 214421 "" "Prometheus/2.33.4" 2026-03-10T07:02:28.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:28 vm02 ceph-mon[50158]: pgmap v159: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:02:28.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:28 vm02 ceph-mon[50158]: from='client.14784 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:02:28.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:28 vm05 ceph-mon[48591]: pgmap v159: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:02:28.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:28 vm05 ceph-mon[48591]: from='client.14784 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:02:29.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:29 vm05 ceph-mon[48591]: pgmap v160: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:02:29.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:29 vm02 ceph-mon[50158]: pgmap v160: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:02:29.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:29 vm02 ceph-mon[54377]: pgmap v160: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:02:29.963 INFO:teuthology.orchestra.run.vm02.stdout:true 2026-03-10T07:02:30.360 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T07:02:30.361 INFO:teuthology.orchestra.run.vm02.stdout:alertmanager.a vm02 *:9093,9094 running (3m) 3m ago 3m 14.6M - ba2b418f427c 519616b4f7a0 2026-03-10T07:02:30.361 INFO:teuthology.orchestra.run.vm02.stdout:grafana.a vm05 *:3000 running (3m) 3m ago 3m 46.8M - 8.3.5 dad864ee21e9 81ae9caed32e 2026-03-10T07:02:30.361 INFO:teuthology.orchestra.run.vm02.stdout:iscsi.foo.vm02.iphfbm vm02 running (3m) 3m ago 3m 67.0M - 3.5 e1d6a67b021e 204f70705475 2026-03-10T07:02:30.361 INFO:teuthology.orchestra.run.vm02.stdout:mgr.x vm05 *:8443 running (5m) 3m ago 5m 417M - 17.2.0 e1d6a67b021e 98ac9e4d7049 2026-03-10T07:02:30.361 INFO:teuthology.orchestra.run.vm02.stdout:mgr.y vm02 *:9283 running (6m) 3m ago 6m 463M - 17.2.0 e1d6a67b021e 1e8c9dfeac74 2026-03-10T07:02:30.361 INFO:teuthology.orchestra.run.vm02.stdout:mon.a vm02 running (6m) 3m ago 6m 51.9M 2048M 17.2.0 e1d6a67b021e 16a124fbc55b 2026-03-10T07:02:30.361 INFO:teuthology.orchestra.run.vm02.stdout:mon.b vm05 running (5m) 3m ago 5m 48.7M 2048M 17.2.0 e1d6a67b021e c223c571b7ce 2026-03-10T07:02:30.361 INFO:teuthology.orchestra.run.vm02.stdout:mon.c vm02 running (5m) 3m ago 5m 49.6M 2048M 17.2.0 e1d6a67b021e d42e67599bdc 2026-03-10T07:02:30.361 INFO:teuthology.orchestra.run.vm02.stdout:node-exporter.a vm02 *:9100 running (4m) 3m ago 4m 13.5M - 1dbe0e931976 79b6fdafea6a 2026-03-10T07:02:30.361 INFO:teuthology.orchestra.run.vm02.stdout:node-exporter.b vm05 *:9100 running (4m) 3m ago 4m 15.0M - 1dbe0e931976 4912deba3ea0 2026-03-10T07:02:30.361 INFO:teuthology.orchestra.run.vm02.stdout:osd.0 vm02 running (5m) 3m ago 5m 47.7M 4096M 17.2.0 e1d6a67b021e 0e972212a251 2026-03-10T07:02:30.361 INFO:teuthology.orchestra.run.vm02.stdout:osd.1 vm02 running (5m) 3m ago 5m 52.3M 4096M 17.2.0 e1d6a67b021e a9f4c8b44f48 2026-03-10T07:02:30.361 INFO:teuthology.orchestra.run.vm02.stdout:osd.2 vm02 running (5m) 3m ago 5m 45.2M 4096M 17.2.0 e1d6a67b021e 6409c5be18aa 2026-03-10T07:02:30.361 INFO:teuthology.orchestra.run.vm02.stdout:osd.3 vm02 running (4m) 3m ago 4m 45.6M 4096M 17.2.0 e1d6a67b021e 5df7cfae139f 2026-03-10T07:02:30.361 INFO:teuthology.orchestra.run.vm02.stdout:osd.4 vm05 running (4m) 3m ago 4m 48.8M 4096M 17.2.0 e1d6a67b021e dcf9e1b40e11 2026-03-10T07:02:30.361 INFO:teuthology.orchestra.run.vm02.stdout:osd.5 vm05 running (4m) 3m ago 4m 46.7M 4096M 17.2.0 e1d6a67b021e ef1a70593f89 2026-03-10T07:02:30.361 INFO:teuthology.orchestra.run.vm02.stdout:osd.6 vm05 running (4m) 3m ago 4m 48.8M 4096M 17.2.0 e1d6a67b021e 9f83e32d5eb6 2026-03-10T07:02:30.361 INFO:teuthology.orchestra.run.vm02.stdout:osd.7 vm05 running (4m) 3m ago 4m 47.3M 4096M 17.2.0 e1d6a67b021e 83d454094982 2026-03-10T07:02:30.361 INFO:teuthology.orchestra.run.vm02.stdout:prometheus.a vm05 *:9095 running (3m) 3m ago 3m 34.5M - 514e6a882f6e 354a9ecb0815 2026-03-10T07:02:30.361 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.kkmsll vm02 *:8000 running (3m) 3m ago 3m 91.0M - 17.2.0 e1d6a67b021e ef903c439808 2026-03-10T07:02:30.361 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm05.bmslvs vm05 *:8000 running (3m) 3m ago 3m 90.8M - 17.2.0 e1d6a67b021e acd35f4810d9 2026-03-10T07:02:30.361 INFO:teuthology.orchestra.run.vm02.stdout:rgw.smpl.vm02.kyvfxo vm02 *:80 running (3m) 3m ago 3m 89.1M - 17.2.0 e1d6a67b021e 6c68381e5378 2026-03-10T07:02:30.361 INFO:teuthology.orchestra.run.vm02.stdout:rgw.smpl.vm05.xjafam vm05 *:80 running (3m) 3m ago 3m 91.0M - 17.2.0 e1d6a67b021e 62a81876b05e 2026-03-10T07:02:30.593 INFO:teuthology.orchestra.run.vm02.stdout:{ 2026-03-10T07:02:30.593 INFO:teuthology.orchestra.run.vm02.stdout: "mon": { 2026-03-10T07:02:30.594 INFO:teuthology.orchestra.run.vm02.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 3 2026-03-10T07:02:30.594 INFO:teuthology.orchestra.run.vm02.stdout: }, 2026-03-10T07:02:30.594 INFO:teuthology.orchestra.run.vm02.stdout: "mgr": { 2026-03-10T07:02:30.594 INFO:teuthology.orchestra.run.vm02.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 2 2026-03-10T07:02:30.594 INFO:teuthology.orchestra.run.vm02.stdout: }, 2026-03-10T07:02:30.594 INFO:teuthology.orchestra.run.vm02.stdout: "osd": { 2026-03-10T07:02:30.594 INFO:teuthology.orchestra.run.vm02.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 8 2026-03-10T07:02:30.594 INFO:teuthology.orchestra.run.vm02.stdout: }, 2026-03-10T07:02:30.594 INFO:teuthology.orchestra.run.vm02.stdout: "mds": {}, 2026-03-10T07:02:30.594 INFO:teuthology.orchestra.run.vm02.stdout: "rgw": { 2026-03-10T07:02:30.594 INFO:teuthology.orchestra.run.vm02.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 4 2026-03-10T07:02:30.594 INFO:teuthology.orchestra.run.vm02.stdout: }, 2026-03-10T07:02:30.594 INFO:teuthology.orchestra.run.vm02.stdout: "overall": { 2026-03-10T07:02:30.594 INFO:teuthology.orchestra.run.vm02.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 17 2026-03-10T07:02:30.594 INFO:teuthology.orchestra.run.vm02.stdout: } 2026-03-10T07:02:30.594 INFO:teuthology.orchestra.run.vm02.stdout:} 2026-03-10T07:02:30.787 INFO:teuthology.orchestra.run.vm02.stdout:{ 2026-03-10T07:02:30.787 INFO:teuthology.orchestra.run.vm02.stdout: "target_image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", 2026-03-10T07:02:30.787 INFO:teuthology.orchestra.run.vm02.stdout: "in_progress": true, 2026-03-10T07:02:30.787 INFO:teuthology.orchestra.run.vm02.stdout: "services_complete": [], 2026-03-10T07:02:30.787 INFO:teuthology.orchestra.run.vm02.stdout: "progress": "0/23 daemons upgraded", 2026-03-10T07:02:30.787 INFO:teuthology.orchestra.run.vm02.stdout: "message": "Pulling quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df image on host vm05" 2026-03-10T07:02:30.787 INFO:teuthology.orchestra.run.vm02.stdout:} 2026-03-10T07:02:31.011 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_OK 2026-03-10T07:02:31.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:30 vm02 ceph-mon[50158]: from='client.24865 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:02:31.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:30 vm02 ceph-mon[50158]: from='client.15018 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:02:31.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:30 vm02 ceph-mon[54377]: from='client.24865 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:02:31.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:30 vm02 ceph-mon[54377]: from='client.15018 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:02:31.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:30 vm05 ceph-mon[48591]: from='client.24865 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:02:31.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:30 vm05 ceph-mon[48591]: from='client.15018 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:02:32.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:32 vm05 ceph-mon[48591]: pgmap v161: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:02:32.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:32 vm05 ceph-mon[48591]: from='client.24877 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:02:32.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:32 vm05 ceph-mon[48591]: from='client.? 192.168.123.102:0/2582695098' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:02:32.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:32 vm05 ceph-mon[48591]: from='client.24889 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:02:32.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:32 vm05 ceph-mon[48591]: from='client.? 192.168.123.102:0/1993392988' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T07:02:32.334 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:31 vm02 ceph-mon[54377]: pgmap v161: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:02:32.334 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:31 vm02 ceph-mon[54377]: from='client.24877 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:02:32.334 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:31 vm02 ceph-mon[54377]: from='client.? 192.168.123.102:0/2582695098' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:02:32.334 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:31 vm02 ceph-mon[54377]: from='client.24889 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:02:32.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:31 vm02 ceph-mon[54377]: from='client.? 192.168.123.102:0/1993392988' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T07:02:32.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:31 vm02 ceph-mon[50158]: pgmap v161: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:02:32.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:31 vm02 ceph-mon[50158]: from='client.24877 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:02:32.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:31 vm02 ceph-mon[50158]: from='client.? 192.168.123.102:0/2582695098' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:02:32.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:31 vm02 ceph-mon[50158]: from='client.24889 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:02:32.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:31 vm02 ceph-mon[50158]: from='client.? 192.168.123.102:0/1993392988' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T07:02:33.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:33 vm02 ceph-mon[54377]: pgmap v162: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:02:33.835 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:02:33 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=error ts=2026-03-10T07:02:33.516Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.102:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.102 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.105:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-10T07:02:33.835 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:02:33 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=error ts=2026-03-10T07:02:33.517Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.105:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.102:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.102 because it doesn't contain any IP SANs" 2026-03-10T07:02:33.835 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:02:33 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=warn ts=2026-03-10T07:02:33.518Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.105:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-10T07:02:33.835 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:02:33 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=warn ts=2026-03-10T07:02:33.518Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.102:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.102 because it doesn't contain any IP SANs" 2026-03-10T07:02:33.835 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:02:33 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=warn ts=2026-03-10T07:02:33.519Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.105:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-10T07:02:33.835 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:02:33 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=warn ts=2026-03-10T07:02:33.519Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.102:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.102 because it doesn't contain any IP SANs" 2026-03-10T07:02:33.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:33 vm02 ceph-mon[50158]: pgmap v162: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:02:33.864 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:33 vm05 ceph-mon[48591]: pgmap v162: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:02:34.682 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:02:34 vm05 systemd[1]: Stopping Ceph mgr.x for 28bd35e6-1c4e-11f1-9057-21b3549603fc... 2026-03-10T07:02:34.682 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:02:34 vm05 bash[68144]: Error: no container with name or ID "ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr.x" found: no such container 2026-03-10T07:02:35.003 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:02:34 vm05 podman[68151]: 2026-03-10 07:02:34.678226966 +0000 UTC m=+0.096753694 container died 98ac9e4d704993a0c2b773b544e8474d0a613d1d42e72e294f58c534134138d1 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x, GIT_BRANCH=HEAD, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, ceph=True, maintainer=Guillaume Abrioux , GIT_REPO=https://github.com/ceph/ceph-container.git, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, vcs-type=git, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, vendor=Red Hat, Inc., CEPH_POINT_RELEASE=-17.2.0, build-date=2022-05-03T08:36:31.336870, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., RELEASE=HEAD, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, com.redhat.component=centos-stream-container, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, release=754, io.openshift.tags=base centos centos-stream, architecture=x86_64, io.buildah.version=1.19.8, io.k8s.display-name=CentOS Stream 8, GIT_CLEAN=True, name=centos-stream, version=8, distribution-scope=public, io.openshift.expose-services=) 2026-03-10T07:02:35.085 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:02:34 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=warn ts=2026-03-10T07:02:34.808Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=3 err="Post \"https://192.168.123.105:8443/api/prometheus_receiver\": dial tcp 192.168.123.105:8443: connect: connection refused" 2026-03-10T07:02:35.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:35 vm05 ceph-mon[48591]: Upgrade: Updating mgr.x 2026-03-10T07:02:35.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:35 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T07:02:35.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:35 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T07:02:35.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:35 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-10T07:02:35.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:35 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:02:35.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:35 vm05 ceph-mon[48591]: Deploying daemon mgr.x on vm05 2026-03-10T07:02:35.504 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:02:35 vm05 podman[68151]: 2026-03-10 07:02:35.171384569 +0000 UTC m=+0.589911297 container remove 98ac9e4d704993a0c2b773b544e8474d0a613d1d42e72e294f58c534134138d1 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x, vendor=Red Hat, Inc., CEPH_POINT_RELEASE=-17.2.0, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, release=754, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, maintainer=Guillaume Abrioux , url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, io.openshift.tags=base centos centos-stream, version=8, RELEASE=HEAD, vcs-type=git, GIT_REPO=https://github.com/ceph/ceph-container.git, distribution-scope=public, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, io.buildah.version=1.19.8, GIT_BRANCH=HEAD, GIT_CLEAN=True, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, name=centos-stream, architecture=x86_64, build-date=2022-05-03T08:36:31.336870, io.openshift.expose-services=, ceph=True, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, io.k8s.display-name=CentOS Stream 8, com.redhat.component=centos-stream-container) 2026-03-10T07:02:35.504 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:02:35 vm05 bash[68151]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x 2026-03-10T07:02:35.504 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:02:35 vm05 bash[68169]: Error: no container with name or ID "ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr.x" found: no such container 2026-03-10T07:02:35.504 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:02:35 vm05 systemd[1]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@mgr.x.service: Main process exited, code=exited, status=143/n/a 2026-03-10T07:02:35.504 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:02:35 vm05 systemd[1]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@mgr.x.service: Failed with result 'exit-code'. 2026-03-10T07:02:35.504 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:02:35 vm05 systemd[1]: Stopped Ceph mgr.x for 28bd35e6-1c4e-11f1-9057-21b3549603fc. 2026-03-10T07:02:35.504 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:02:35 vm05 systemd[1]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@mgr.x.service: Consumed 10.846s CPU time. 2026-03-10T07:02:35.504 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:02:35 vm05 systemd[1]: Starting Ceph mgr.x for 28bd35e6-1c4e-11f1-9057-21b3549603fc... 2026-03-10T07:02:35.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:35 vm02 ceph-mon[54377]: Upgrade: Updating mgr.x 2026-03-10T07:02:35.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:35 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T07:02:35.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:35 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T07:02:35.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:35 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-10T07:02:35.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:35 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:02:35.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:35 vm02 ceph-mon[54377]: Deploying daemon mgr.x on vm05 2026-03-10T07:02:35.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:35 vm02 ceph-mon[50158]: Upgrade: Updating mgr.x 2026-03-10T07:02:35.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:35 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T07:02:35.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:35 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T07:02:35.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:35 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-10T07:02:35.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:35 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:02:35.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:35 vm02 ceph-mon[50158]: Deploying daemon mgr.x on vm05 2026-03-10T07:02:35.994 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:02:35 vm05 podman[68253]: 2026-03-10 07:02:35.517346185 +0000 UTC m=+0.025599114 container create 1fbb0bd98b142a7bce6436903745d28ac7c2ca57c8f216b2e99f0b9863f5b87c (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=squid, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.label-schema.build-date=20260223, ceph=True, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team , io.buildah.version=1.41.3, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.license=GPLv2) 2026-03-10T07:02:35.994 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:02:35 vm05 podman[68253]: 2026-03-10 07:02:35.556660665 +0000 UTC m=+0.064913605 container init 1fbb0bd98b142a7bce6436903745d28ac7c2ca57c8f216b2e99f0b9863f5b87c (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x, org.label-schema.vendor=CentOS, org.label-schema.build-date=20260223, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, ceph=True, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git) 2026-03-10T07:02:35.994 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:02:35 vm05 podman[68253]: 2026-03-10 07:02:35.559675495 +0000 UTC m=+0.067928424 container start 1fbb0bd98b142a7bce6436903745d28ac7c2ca57c8f216b2e99f0b9863f5b87c (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=squid, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, ceph=True, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20260223, io.buildah.version=1.41.3, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df) 2026-03-10T07:02:35.994 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:02:35 vm05 bash[68253]: 1fbb0bd98b142a7bce6436903745d28ac7c2ca57c8f216b2e99f0b9863f5b87c 2026-03-10T07:02:35.994 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:02:35 vm05 podman[68253]: 2026-03-10 07:02:35.503404087 +0000 UTC m=+0.011657026 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T07:02:35.994 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:02:35 vm05 systemd[1]: Started Ceph mgr.x for 28bd35e6-1c4e-11f1-9057-21b3549603fc. 2026-03-10T07:02:35.994 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:02:35 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: 2026-03-10T07:02:35.678+0000 7fe5c187c140 -1 mgr[py] Module alerts has missing NOTIFY_TYPES member 2026-03-10T07:02:35.994 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:02:35 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: 2026-03-10T07:02:35.723+0000 7fe5c187c140 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-10T07:02:36.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:36 vm05 ceph-mon[48591]: pgmap v163: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:02:36.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:36 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T07:02:36.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:36 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:02:36.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:36 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T07:02:36.256 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:02:36 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: 2026-03-10T07:02:36.181+0000 7fe5c187c140 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-10T07:02:36.583 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:02:36 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: 2026-03-10T07:02:36.582+0000 7fe5c187c140 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-10T07:02:36.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:36 vm02 ceph-mon[54377]: pgmap v163: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:02:36.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:36 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T07:02:36.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:36 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:02:36.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:36 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T07:02:36.585 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:02:36 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=warn ts=2026-03-10T07:02:36.145Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=4 err="Post \"https://192.168.123.105:8443/api/prometheus_receiver\": dial tcp 192.168.123.105:8443: connect: connection refused" 2026-03-10T07:02:36.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:36 vm02 ceph-mon[50158]: pgmap v163: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:02:36.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:36 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T07:02:36.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:36 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:02:36.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:36 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T07:02:36.851 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:02:36 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: /lib64/python3.9/site-packages/scipy/__init__.py:73: UserWarning: NumPy was imported from a Python sub-interpreter but NumPy does not properly support sub-interpreters. This will likely work for most users but might cause hard to track down issues or subtle bugs. A common user of the rare sub-interpreter feature is wsgi which also allows single-interpreter mode. 2026-03-10T07:02:36.851 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:02:36 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: Improvements in the case of bugs are welcome, but is not on the NumPy roadmap, and full support may require significant effort to achieve. 2026-03-10T07:02:36.851 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:02:36 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: from numpy import show_config as show_numpy_config 2026-03-10T07:02:36.851 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:02:36 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: 2026-03-10T07:02:36.680+0000 7fe5c187c140 -1 mgr[py] Module diskprediction_local has missing NOTIFY_TYPES member 2026-03-10T07:02:36.851 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:02:36 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: 2026-03-10T07:02:36.720+0000 7fe5c187c140 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-10T07:02:36.851 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:02:36 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: 2026-03-10T07:02:36.797+0000 7fe5c187c140 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-10T07:02:37.440 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:37 vm05 ceph-mon[48591]: pgmap v164: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:02:37.440 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:37 vm05 ceph-mon[48591]: from='client.14784 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:02:37.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:37 vm02 ceph-mon[54377]: pgmap v164: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:02:37.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:37 vm02 ceph-mon[54377]: from='client.14784 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:02:37.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:37 vm02 ceph-mon[50158]: pgmap v164: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:02:37.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:37 vm02 ceph-mon[50158]: from='client.14784 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:02:37.701 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:02:37 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: 2026-03-10T07:02:37.439+0000 7fe5c187c140 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-10T07:02:37.701 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:02:37 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: 2026-03-10T07:02:37.571+0000 7fe5c187c140 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-10T07:02:37.701 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:02:37 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: 2026-03-10T07:02:37.617+0000 7fe5c187c140 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-10T07:02:37.701 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:02:37 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: 2026-03-10T07:02:37.655+0000 7fe5c187c140 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-10T07:02:37.955 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:02:37 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: 2026-03-10T07:02:37.700+0000 7fe5c187c140 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-10T07:02:37.955 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:02:37 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: 2026-03-10T07:02:37.741+0000 7fe5c187c140 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-10T07:02:37.955 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:02:37 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: 2026-03-10T07:02:37.943+0000 7fe5c187c140 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-10T07:02:38.085 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:02:37 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[50367]: ::ffff:192.168.123.105 - - [10/Mar/2026:07:02:37] "GET /metrics HTTP/1.1" 200 214424 "" "Prometheus/2.33.4" 2026-03-10T07:02:38.253 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:02:38 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: 2026-03-10T07:02:38.010+0000 7fe5c187c140 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-10T07:02:38.567 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:02:38 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: 2026-03-10T07:02:38.278+0000 7fe5c187c140 -1 mgr[py] Module rgw has missing NOTIFY_TYPES member 2026-03-10T07:02:38.829 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:02:38 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: 2026-03-10T07:02:38.654+0000 7fe5c187c140 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-10T07:02:38.829 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:02:38 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: 2026-03-10T07:02:38.701+0000 7fe5c187c140 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-10T07:02:38.829 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:02:38 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: 2026-03-10T07:02:38.750+0000 7fe5c187c140 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-10T07:02:38.830 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:38 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T07:02:38.830 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:38 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T07:02:38.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:38 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T07:02:38.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:38 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T07:02:38.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:38 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T07:02:38.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:38 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T07:02:39.179 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:02:38 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: 2026-03-10T07:02:38.861+0000 7fe5c187c140 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-10T07:02:39.179 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:02:38 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: 2026-03-10T07:02:38.902+0000 7fe5c187c140 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-10T07:02:39.179 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:02:39 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: 2026-03-10T07:02:39.048+0000 7fe5c187c140 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-10T07:02:39.488 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:02:39 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: 2026-03-10T07:02:39.178+0000 7fe5c187c140 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-10T07:02:39.488 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:02:39 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: 2026-03-10T07:02:39.332+0000 7fe5c187c140 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-10T07:02:39.488 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:02:39 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: 2026-03-10T07:02:39.379+0000 7fe5c187c140 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-10T07:02:39.488 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:02:39 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: [10/Mar/2026:07:02:39] ENGINE Bus STARTING 2026-03-10T07:02:39.488 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:02:39 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: CherryPy Checker: 2026-03-10T07:02:39.488 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:02:39 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: The Application mounted at '' has an empty config. 2026-03-10T07:02:39.488 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:02:39 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: 2026-03-10T07:02:39.488 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:02:39 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: [10/Mar/2026:07:02:39] ENGINE Serving on http://:::9283 2026-03-10T07:02:39.488 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:02:39 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: [10/Mar/2026:07:02:39] ENGINE Bus STARTED 2026-03-10T07:02:39.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:39 vm05 ceph-mon[48591]: pgmap v165: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:02:40.084 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:39 vm02 ceph-mon[54377]: pgmap v165: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:02:40.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:39 vm02 ceph-mon[50158]: pgmap v165: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:02:40.585 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:02:40 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=warn ts=2026-03-10T07:02:40.325Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=6 err="Post \"https://192.168.123.105:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-10T07:02:40.585 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:02:40 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=warn ts=2026-03-10T07:02:40.337Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=6 err="Post \"https://192.168.123.105:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-10T07:02:40.992 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:02:40 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: [10/Mar/2026:07:02:40] ENGINE Bus STOPPING 2026-03-10T07:02:40.992 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:40 vm05 ceph-mon[48591]: from='mgr.? 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-10T07:02:40.992 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:40 vm05 ceph-mon[48591]: Standby manager daemon x restarted 2026-03-10T07:02:40.992 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:40 vm05 ceph-mon[48591]: Standby manager daemon x started 2026-03-10T07:02:40.992 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:40 vm05 ceph-mon[48591]: from='mgr.? 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T07:02:40.992 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:40 vm05 ceph-mon[48591]: from='mgr.? 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-10T07:02:40.992 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:40 vm05 ceph-mon[48591]: from='mgr.? 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T07:02:40.992 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:40 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T07:02:40.993 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:40 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T07:02:40.993 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:40 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T07:02:40.993 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:40 vm05 ceph-mon[48591]: Upgrade: Need to upgrade myself (mgr.y) 2026-03-10T07:02:40.993 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:40 vm05 ceph-mon[48591]: Failing over to other MGR 2026-03-10T07:02:40.993 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:40 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "mgr fail", "who": "y"}]: dispatch 2026-03-10T07:02:40.993 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:40 vm05 ceph-mon[48591]: osdmap e77: 8 total, 8 up, 8 in 2026-03-10T07:02:40.993 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:40 vm05 ceph-mon[48591]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T07:02:40.993 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:40 vm05 ceph-mon[48591]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T07:02:40.993 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:40 vm05 ceph-mon[48591]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T07:02:40.993 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:40 vm05 ceph-mon[48591]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-10T07:02:40.993 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:40 vm05 ceph-mon[48591]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T07:02:40.993 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:40 vm05 ceph-mon[48591]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T07:02:40.993 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:40 vm05 ceph-mon[48591]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T07:02:40.993 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:40 vm05 ceph-mon[48591]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T07:02:40.993 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:40 vm05 ceph-mon[48591]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T07:02:40.993 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:40 vm05 ceph-mon[48591]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T07:02:40.993 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:40 vm05 ceph-mon[48591]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T07:02:40.993 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:40 vm05 ceph-mon[48591]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T07:02:40.993 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:40 vm05 ceph-mon[48591]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T07:02:40.993 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:40 vm05 ceph-mon[48591]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T07:02:40.993 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:40 vm05 ceph-mon[48591]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T07:02:40.993 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:40 vm05 ceph-mon[48591]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd='[{"prefix": "mgr fail", "who": "y"}]': finished 2026-03-10T07:02:40.993 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:40 vm05 ceph-mon[48591]: mgrmap e21: x(active, starting, since 0.874285s) 2026-03-10T07:02:41.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:40 vm02 ceph-mon[54377]: from='mgr.? 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-10T07:02:41.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:40 vm02 ceph-mon[54377]: Standby manager daemon x restarted 2026-03-10T07:02:41.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:40 vm02 ceph-mon[54377]: Standby manager daemon x started 2026-03-10T07:02:41.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:40 vm02 ceph-mon[54377]: from='mgr.? 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T07:02:41.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:40 vm02 ceph-mon[54377]: from='mgr.? 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-10T07:02:41.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:40 vm02 ceph-mon[54377]: from='mgr.? 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T07:02:41.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:40 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T07:02:41.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:40 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T07:02:41.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:40 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T07:02:41.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:40 vm02 ceph-mon[54377]: Upgrade: Need to upgrade myself (mgr.y) 2026-03-10T07:02:41.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:40 vm02 ceph-mon[54377]: Failing over to other MGR 2026-03-10T07:02:41.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:40 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "mgr fail", "who": "y"}]: dispatch 2026-03-10T07:02:41.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:40 vm02 ceph-mon[54377]: osdmap e77: 8 total, 8 up, 8 in 2026-03-10T07:02:41.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:40 vm02 ceph-mon[54377]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T07:02:41.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:40 vm02 ceph-mon[54377]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T07:02:41.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:40 vm02 ceph-mon[54377]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T07:02:41.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:40 vm02 ceph-mon[54377]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-10T07:02:41.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:40 vm02 ceph-mon[54377]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T07:02:41.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:40 vm02 ceph-mon[54377]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T07:02:41.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:40 vm02 ceph-mon[54377]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T07:02:41.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:40 vm02 ceph-mon[54377]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T07:02:41.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:40 vm02 ceph-mon[54377]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T07:02:41.086 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:40 vm02 ceph-mon[54377]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T07:02:41.086 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:40 vm02 ceph-mon[54377]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T07:02:41.086 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:40 vm02 ceph-mon[54377]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T07:02:41.086 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:40 vm02 ceph-mon[54377]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T07:02:41.086 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:40 vm02 ceph-mon[54377]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T07:02:41.086 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:40 vm02 ceph-mon[54377]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T07:02:41.086 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:40 vm02 ceph-mon[54377]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd='[{"prefix": "mgr fail", "who": "y"}]': finished 2026-03-10T07:02:41.086 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:40 vm02 ceph-mon[54377]: mgrmap e21: x(active, starting, since 0.874285s) 2026-03-10T07:02:41.086 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:02:40 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[50367]: ignoring --setuser ceph since I am not root 2026-03-10T07:02:41.086 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:02:40 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[50367]: ignoring --setgroup ceph since I am not root 2026-03-10T07:02:41.086 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:02:40 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[50367]: 2026-03-10T07:02:40.736+0000 7faf739ba000 -1 mgr[py] Module alerts has missing NOTIFY_TYPES member 2026-03-10T07:02:41.086 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:02:40 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[50367]: 2026-03-10T07:02:40.801+0000 7faf739ba000 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-10T07:02:41.086 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:40 vm02 ceph-mon[50158]: from='mgr.? 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-10T07:02:41.086 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:40 vm02 ceph-mon[50158]: Standby manager daemon x restarted 2026-03-10T07:02:41.086 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:40 vm02 ceph-mon[50158]: Standby manager daemon x started 2026-03-10T07:02:41.086 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:40 vm02 ceph-mon[50158]: from='mgr.? 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T07:02:41.086 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:40 vm02 ceph-mon[50158]: from='mgr.? 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-10T07:02:41.086 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:40 vm02 ceph-mon[50158]: from='mgr.? 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T07:02:41.086 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:40 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T07:02:41.086 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:40 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' 2026-03-10T07:02:41.086 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:40 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T07:02:41.086 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:40 vm02 ceph-mon[50158]: Upgrade: Need to upgrade myself (mgr.y) 2026-03-10T07:02:41.086 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:40 vm02 ceph-mon[50158]: Failing over to other MGR 2026-03-10T07:02:41.086 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:40 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd=[{"prefix": "mgr fail", "who": "y"}]: dispatch 2026-03-10T07:02:41.086 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:40 vm02 ceph-mon[50158]: osdmap e77: 8 total, 8 up, 8 in 2026-03-10T07:02:41.086 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:40 vm02 ceph-mon[50158]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T07:02:41.086 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:40 vm02 ceph-mon[50158]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T07:02:41.086 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:40 vm02 ceph-mon[50158]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T07:02:41.086 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:40 vm02 ceph-mon[50158]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-10T07:02:41.086 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:40 vm02 ceph-mon[50158]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T07:02:41.086 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:40 vm02 ceph-mon[50158]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T07:02:41.087 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:40 vm02 ceph-mon[50158]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T07:02:41.087 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:40 vm02 ceph-mon[50158]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T07:02:41.087 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:40 vm02 ceph-mon[50158]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T07:02:41.087 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:40 vm02 ceph-mon[50158]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T07:02:41.087 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:40 vm02 ceph-mon[50158]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T07:02:41.087 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:40 vm02 ceph-mon[50158]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T07:02:41.087 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:40 vm02 ceph-mon[50158]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T07:02:41.087 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:40 vm02 ceph-mon[50158]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T07:02:41.087 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:40 vm02 ceph-mon[50158]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T07:02:41.087 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:40 vm02 ceph-mon[50158]: from='mgr.14415 192.168.123.102:0/4011379882' entity='mgr.y' cmd='[{"prefix": "mgr fail", "who": "y"}]': finished 2026-03-10T07:02:41.087 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:40 vm02 ceph-mon[50158]: mgrmap e21: x(active, starting, since 0.874285s) 2026-03-10T07:02:41.253 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:02:40 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: [10/Mar/2026:07:02:40] ENGINE HTTP Server cherrypy._cpwsgi_server.CPWSGIServer(('::', 9283)) shut down 2026-03-10T07:02:41.253 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:02:40 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: [10/Mar/2026:07:02:40] ENGINE Bus STOPPED 2026-03-10T07:02:41.253 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:02:41 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: [10/Mar/2026:07:02:41] ENGINE Bus STARTING 2026-03-10T07:02:41.585 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:02:41 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[50367]: 2026-03-10T07:02:41.250+0000 7faf739ba000 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-10T07:02:41.711 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:02:41 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: [10/Mar/2026:07:02:41] ENGINE Serving on http://:::9283 2026-03-10T07:02:41.711 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:02:41 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: [10/Mar/2026:07:02:41] ENGINE Bus STARTED 2026-03-10T07:02:41.938 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:41 vm02 ceph-mon[54377]: Manager daemon x is now available 2026-03-10T07:02:41.938 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:41 vm02 ceph-mon[54377]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:41.938 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:41 vm02 ceph-mon[54377]: Queued rgw.foo for migration 2026-03-10T07:02:41.938 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:41 vm02 ceph-mon[54377]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:41.938 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:41 vm02 ceph-mon[54377]: Queued rgw.smpl for migration 2026-03-10T07:02:41.938 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:41 vm02 ceph-mon[54377]: No Migration is needed for rgw spec: {'placement': {'count': 2}, 'service_id': 'foo', 'service_name': 'rgw.foo', 'service_type': 'rgw', 'spec': {'rgw_frontend_port': 8000, 'rgw_realm': 'r', 'rgw_zone': 'z'}} 2026-03-10T07:02:41.938 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:41 vm02 ceph-mon[54377]: No Migration is needed for rgw spec: {'placement': {'count': 2}, 'service_id': 'smpl', 'service_name': 'rgw.smpl', 'service_type': 'rgw'} 2026-03-10T07:02:41.938 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:41 vm02 ceph-mon[54377]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:41.939 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:41 vm02 ceph-mon[54377]: Migrating certs/keys for iscsi.foo spec to cert store 2026-03-10T07:02:41.939 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:41 vm02 ceph-mon[54377]: Migrating certs/keys for rgw.foo spec to cert store 2026-03-10T07:02:41.939 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:41 vm02 ceph-mon[54377]: Migrating certs/keys for rgw.smpl spec to cert store 2026-03-10T07:02:41.939 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:41 vm02 ceph-mon[54377]: Checking for cert/key for grafana.a 2026-03-10T07:02:41.939 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:41 vm02 ceph-mon[54377]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:41.939 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:41 vm02 ceph-mon[54377]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T07:02:41.939 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:41 vm02 ceph-mon[54377]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/mirror_snapshot_schedule"}]: dispatch 2026-03-10T07:02:41.939 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:41 vm02 ceph-mon[54377]: from='mgr.24776 ' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/mirror_snapshot_schedule"}]: dispatch 2026-03-10T07:02:41.939 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:41 vm02 ceph-mon[54377]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/trash_purge_schedule"}]: dispatch 2026-03-10T07:02:41.939 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:41 vm02 ceph-mon[54377]: from='mgr.24776 ' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/trash_purge_schedule"}]: dispatch 2026-03-10T07:02:41.939 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:41 vm02 ceph-mon[54377]: Deploying cephadm binary to vm05 2026-03-10T07:02:41.939 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:41 vm02 ceph-mon[54377]: Deploying cephadm binary to vm02 2026-03-10T07:02:41.939 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:02:41 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[50367]: 2026-03-10T07:02:41.679+0000 7faf739ba000 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-10T07:02:41.939 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:02:41 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[50367]: 2026-03-10T07:02:41.866+0000 7faf739ba000 -1 mgr[py] Module diskprediction_local has missing NOTIFY_TYPES member 2026-03-10T07:02:41.939 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:41 vm02 ceph-mon[50158]: Manager daemon x is now available 2026-03-10T07:02:41.939 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:41 vm02 ceph-mon[50158]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:41.939 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:41 vm02 ceph-mon[50158]: Queued rgw.foo for migration 2026-03-10T07:02:41.939 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:41 vm02 ceph-mon[50158]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:41.939 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:41 vm02 ceph-mon[50158]: Queued rgw.smpl for migration 2026-03-10T07:02:41.939 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:41 vm02 ceph-mon[50158]: No Migration is needed for rgw spec: {'placement': {'count': 2}, 'service_id': 'foo', 'service_name': 'rgw.foo', 'service_type': 'rgw', 'spec': {'rgw_frontend_port': 8000, 'rgw_realm': 'r', 'rgw_zone': 'z'}} 2026-03-10T07:02:41.939 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:41 vm02 ceph-mon[50158]: No Migration is needed for rgw spec: {'placement': {'count': 2}, 'service_id': 'smpl', 'service_name': 'rgw.smpl', 'service_type': 'rgw'} 2026-03-10T07:02:41.940 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:41 vm02 ceph-mon[50158]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:41.940 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:41 vm02 ceph-mon[50158]: Migrating certs/keys for iscsi.foo spec to cert store 2026-03-10T07:02:41.940 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:41 vm02 ceph-mon[50158]: Migrating certs/keys for rgw.foo spec to cert store 2026-03-10T07:02:41.940 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:41 vm02 ceph-mon[50158]: Migrating certs/keys for rgw.smpl spec to cert store 2026-03-10T07:02:41.940 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:41 vm02 ceph-mon[50158]: Checking for cert/key for grafana.a 2026-03-10T07:02:41.940 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:41 vm02 ceph-mon[50158]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:41.940 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:41 vm02 ceph-mon[50158]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T07:02:41.940 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:41 vm02 ceph-mon[50158]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/mirror_snapshot_schedule"}]: dispatch 2026-03-10T07:02:41.940 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:41 vm02 ceph-mon[50158]: from='mgr.24776 ' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/mirror_snapshot_schedule"}]: dispatch 2026-03-10T07:02:41.940 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:41 vm02 ceph-mon[50158]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/trash_purge_schedule"}]: dispatch 2026-03-10T07:02:41.940 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:41 vm02 ceph-mon[50158]: from='mgr.24776 ' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/trash_purge_schedule"}]: dispatch 2026-03-10T07:02:41.940 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:41 vm02 ceph-mon[50158]: Deploying cephadm binary to vm05 2026-03-10T07:02:41.940 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:41 vm02 ceph-mon[50158]: Deploying cephadm binary to vm02 2026-03-10T07:02:42.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:41 vm05 ceph-mon[48591]: Manager daemon x is now available 2026-03-10T07:02:42.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:41 vm05 ceph-mon[48591]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:42.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:41 vm05 ceph-mon[48591]: Queued rgw.foo for migration 2026-03-10T07:02:42.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:41 vm05 ceph-mon[48591]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:42.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:41 vm05 ceph-mon[48591]: Queued rgw.smpl for migration 2026-03-10T07:02:42.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:41 vm05 ceph-mon[48591]: No Migration is needed for rgw spec: {'placement': {'count': 2}, 'service_id': 'foo', 'service_name': 'rgw.foo', 'service_type': 'rgw', 'spec': {'rgw_frontend_port': 8000, 'rgw_realm': 'r', 'rgw_zone': 'z'}} 2026-03-10T07:02:42.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:41 vm05 ceph-mon[48591]: No Migration is needed for rgw spec: {'placement': {'count': 2}, 'service_id': 'smpl', 'service_name': 'rgw.smpl', 'service_type': 'rgw'} 2026-03-10T07:02:42.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:41 vm05 ceph-mon[48591]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:42.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:41 vm05 ceph-mon[48591]: Migrating certs/keys for iscsi.foo spec to cert store 2026-03-10T07:02:42.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:41 vm05 ceph-mon[48591]: Migrating certs/keys for rgw.foo spec to cert store 2026-03-10T07:02:42.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:41 vm05 ceph-mon[48591]: Migrating certs/keys for rgw.smpl spec to cert store 2026-03-10T07:02:42.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:41 vm05 ceph-mon[48591]: Checking for cert/key for grafana.a 2026-03-10T07:02:42.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:41 vm05 ceph-mon[48591]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:42.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:41 vm05 ceph-mon[48591]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T07:02:42.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:41 vm05 ceph-mon[48591]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/mirror_snapshot_schedule"}]: dispatch 2026-03-10T07:02:42.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:41 vm05 ceph-mon[48591]: from='mgr.24776 ' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/mirror_snapshot_schedule"}]: dispatch 2026-03-10T07:02:42.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:41 vm05 ceph-mon[48591]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/trash_purge_schedule"}]: dispatch 2026-03-10T07:02:42.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:41 vm05 ceph-mon[48591]: from='mgr.24776 ' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/trash_purge_schedule"}]: dispatch 2026-03-10T07:02:42.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:41 vm05 ceph-mon[48591]: Deploying cephadm binary to vm05 2026-03-10T07:02:42.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:41 vm05 ceph-mon[48591]: Deploying cephadm binary to vm02 2026-03-10T07:02:42.335 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:02:41 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[50367]: 2026-03-10T07:02:41.937+0000 7faf739ba000 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-10T07:02:42.335 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:02:42 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[50367]: 2026-03-10T07:02:42.138+0000 7faf739ba000 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-10T07:02:43.038 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:42 vm02 ceph-mon[54377]: mgrmap e22: x(active, since 2s) 2026-03-10T07:02:43.038 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:42 vm02 ceph-mon[54377]: pgmap v3: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail 2026-03-10T07:02:43.038 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:42 vm02 ceph-mon[54377]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:43.038 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:42 vm02 ceph-mon[54377]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:43.038 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:42 vm02 ceph-mon[54377]: pgmap v4: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail 2026-03-10T07:02:43.038 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:02:42 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[50367]: 2026-03-10T07:02:42.817+0000 7faf739ba000 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-10T07:02:43.039 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:02:42 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=warn ts=2026-03-10T07:02:42.813Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=7 err="Post \"https://192.168.123.102:8443//api/prometheus_receiver\": dial tcp 192.168.123.102:8443: connect: connection refused" 2026-03-10T07:02:43.039 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:42 vm02 ceph-mon[50158]: mgrmap e22: x(active, since 2s) 2026-03-10T07:02:43.039 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:42 vm02 ceph-mon[50158]: pgmap v3: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail 2026-03-10T07:02:43.039 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:42 vm02 ceph-mon[50158]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:43.039 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:42 vm02 ceph-mon[50158]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:43.039 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:42 vm02 ceph-mon[50158]: pgmap v4: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail 2026-03-10T07:02:43.256 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:42 vm05 ceph-mon[48591]: mgrmap e22: x(active, since 2s) 2026-03-10T07:02:43.256 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:42 vm05 ceph-mon[48591]: pgmap v3: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail 2026-03-10T07:02:43.256 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:42 vm05 ceph-mon[48591]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:43.256 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:42 vm05 ceph-mon[48591]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:43.256 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:42 vm05 ceph-mon[48591]: pgmap v4: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail 2026-03-10T07:02:43.335 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:02:43 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[50367]: 2026-03-10T07:02:43.037+0000 7faf739ba000 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-10T07:02:43.335 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:02:43 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[50367]: 2026-03-10T07:02:43.100+0000 7faf739ba000 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-10T07:02:43.335 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:02:43 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[50367]: 2026-03-10T07:02:43.168+0000 7faf739ba000 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-10T07:02:43.335 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:02:43 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[50367]: 2026-03-10T07:02:43.265+0000 7faf739ba000 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-10T07:02:43.639 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:02:43 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[50367]: 2026-03-10T07:02:43.343+0000 7faf739ba000 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-10T07:02:43.640 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:02:43 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=error ts=2026-03-10T07:02:43.517Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.102:8443//api/prometheus_receiver\": dial tcp 192.168.123.102:8443: connect: connection refused; ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.105:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-10T07:02:43.640 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:02:43 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=warn ts=2026-03-10T07:02:43.517Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.102:8443//api/prometheus_receiver\": dial tcp 192.168.123.102:8443: connect: connection refused" 2026-03-10T07:02:43.640 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:02:43 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=warn ts=2026-03-10T07:02:43.519Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.105:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-10T07:02:43.640 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:02:43 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=error ts=2026-03-10T07:02:43.523Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.102:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.102 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.105:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-10T07:02:43.640 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:02:43 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=warn ts=2026-03-10T07:02:43.524Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.102:8443//api/prometheus_receiver\": dial tcp 192.168.123.102:8443: connect: connection refused" 2026-03-10T07:02:43.640 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:02:43 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=warn ts=2026-03-10T07:02:43.525Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.105:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-10T07:02:43.965 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:43 vm05 ceph-mon[48591]: [10/Mar/2026:07:02:43] ENGINE Bus STARTING 2026-03-10T07:02:43.965 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:43 vm05 ceph-mon[48591]: [10/Mar/2026:07:02:43] ENGINE Serving on http://192.168.123.105:8765 2026-03-10T07:02:44.018 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:43 vm02 ceph-mon[54377]: [10/Mar/2026:07:02:43] ENGINE Bus STARTING 2026-03-10T07:02:44.018 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:43 vm02 ceph-mon[54377]: [10/Mar/2026:07:02:43] ENGINE Serving on http://192.168.123.105:8765 2026-03-10T07:02:44.018 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:02:43 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[50367]: 2026-03-10T07:02:43.739+0000 7faf739ba000 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-10T07:02:44.018 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:02:43 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[50367]: 2026-03-10T07:02:43.861+0000 7faf739ba000 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-10T07:02:44.019 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:43 vm02 ceph-mon[50158]: [10/Mar/2026:07:02:43] ENGINE Bus STARTING 2026-03-10T07:02:44.019 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:43 vm02 ceph-mon[50158]: [10/Mar/2026:07:02:43] ENGINE Serving on http://192.168.123.105:8765 2026-03-10T07:02:44.807 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:02:44 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[50367]: 2026-03-10T07:02:44.599+0000 7faf739ba000 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-10T07:02:44.807 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:02:44 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[50367]: 2026-03-10T07:02:44.675+0000 7faf739ba000 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-10T07:02:44.808 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:02:44 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[50367]: 2026-03-10T07:02:44.760+0000 7faf739ba000 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-10T07:02:45.065 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:44 vm02 ceph-mon[54377]: [10/Mar/2026:07:02:43] ENGINE Serving on https://192.168.123.105:7150 2026-03-10T07:02:45.065 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:44 vm02 ceph-mon[54377]: [10/Mar/2026:07:02:43] ENGINE Bus STARTED 2026-03-10T07:02:45.065 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:44 vm02 ceph-mon[54377]: [10/Mar/2026:07:02:43] ENGINE Client ('192.168.123.105', 44590) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-10T07:02:45.065 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:44 vm02 ceph-mon[54377]: mgrmap e23: x(active, since 4s) 2026-03-10T07:02:45.065 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:44 vm02 ceph-mon[54377]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:45.065 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:44 vm02 ceph-mon[54377]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:45.065 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:44 vm02 ceph-mon[54377]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:45.065 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:44 vm02 ceph-mon[54377]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:45.065 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:44 vm02 ceph-mon[54377]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:45.065 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:44 vm02 ceph-mon[54377]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:45.065 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:44 vm02 ceph-mon[54377]: pgmap v5: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail 2026-03-10T07:02:45.065 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:44 vm02 ceph-mon[54377]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:45.065 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:44 vm02 ceph-mon[54377]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:45.065 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:44 vm02 ceph-mon[54377]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm02", "name": "osd_memory_target"}]: dispatch 2026-03-10T07:02:45.065 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:44 vm02 ceph-mon[54377]: from='mgr.24776 ' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm02", "name": "osd_memory_target"}]: dispatch 2026-03-10T07:02:45.065 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:02:44 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[50367]: 2026-03-10T07:02:44.898+0000 7faf739ba000 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-10T07:02:45.065 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:02:44 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[50367]: 2026-03-10T07:02:44.956+0000 7faf739ba000 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-10T07:02:45.065 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:44 vm02 ceph-mon[50158]: [10/Mar/2026:07:02:43] ENGINE Serving on https://192.168.123.105:7150 2026-03-10T07:02:45.065 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:44 vm02 ceph-mon[50158]: [10/Mar/2026:07:02:43] ENGINE Bus STARTED 2026-03-10T07:02:45.065 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:44 vm02 ceph-mon[50158]: [10/Mar/2026:07:02:43] ENGINE Client ('192.168.123.105', 44590) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-10T07:02:45.065 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:44 vm02 ceph-mon[50158]: mgrmap e23: x(active, since 4s) 2026-03-10T07:02:45.065 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:44 vm02 ceph-mon[50158]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:45.065 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:44 vm02 ceph-mon[50158]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:45.065 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:44 vm02 ceph-mon[50158]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:45.065 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:44 vm02 ceph-mon[50158]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:45.065 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:44 vm02 ceph-mon[50158]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:45.065 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:44 vm02 ceph-mon[50158]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:45.066 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:44 vm02 ceph-mon[50158]: pgmap v5: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail 2026-03-10T07:02:45.066 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:44 vm02 ceph-mon[50158]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:45.066 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:44 vm02 ceph-mon[50158]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:45.066 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:44 vm02 ceph-mon[50158]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm02", "name": "osd_memory_target"}]: dispatch 2026-03-10T07:02:45.066 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:44 vm02 ceph-mon[50158]: from='mgr.24776 ' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm02", "name": "osd_memory_target"}]: dispatch 2026-03-10T07:02:45.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:44 vm05 ceph-mon[48591]: [10/Mar/2026:07:02:43] ENGINE Serving on https://192.168.123.105:7150 2026-03-10T07:02:45.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:44 vm05 ceph-mon[48591]: [10/Mar/2026:07:02:43] ENGINE Bus STARTED 2026-03-10T07:02:45.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:44 vm05 ceph-mon[48591]: [10/Mar/2026:07:02:43] ENGINE Client ('192.168.123.105', 44590) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-10T07:02:45.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:44 vm05 ceph-mon[48591]: mgrmap e23: x(active, since 4s) 2026-03-10T07:02:45.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:44 vm05 ceph-mon[48591]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:45.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:44 vm05 ceph-mon[48591]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:45.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:44 vm05 ceph-mon[48591]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:45.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:44 vm05 ceph-mon[48591]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:45.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:44 vm05 ceph-mon[48591]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:45.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:44 vm05 ceph-mon[48591]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:45.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:44 vm05 ceph-mon[48591]: pgmap v5: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail 2026-03-10T07:02:45.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:44 vm05 ceph-mon[48591]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:45.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:44 vm05 ceph-mon[48591]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:45.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:44 vm05 ceph-mon[48591]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm02", "name": "osd_memory_target"}]: dispatch 2026-03-10T07:02:45.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:44 vm05 ceph-mon[48591]: from='mgr.24776 ' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm02", "name": "osd_memory_target"}]: dispatch 2026-03-10T07:02:45.335 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:02:45 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[50367]: 2026-03-10T07:02:45.063+0000 7faf739ba000 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-10T07:02:45.335 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:02:45 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[50367]: 2026-03-10T07:02:45.158+0000 7faf739ba000 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-10T07:02:45.764 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:02:45 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[50367]: 2026-03-10T07:02:45.498+0000 7faf739ba000 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-10T07:02:45.765 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:02:45 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[50367]: 2026-03-10T07:02:45.564+0000 7faf739ba000 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-10T07:02:45.765 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:02:45 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[50367]: [10/Mar/2026:07:02:45] ENGINE Bus STARTING 2026-03-10T07:02:45.765 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:02:45 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[50367]: CherryPy Checker: 2026-03-10T07:02:45.765 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:02:45 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[50367]: The Application mounted at '' has an empty config. 2026-03-10T07:02:45.765 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:02:45 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[50367]: 2026-03-10T07:02:45.765 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:02:45 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[50367]: [10/Mar/2026:07:02:45] ENGINE Serving on http://:::9283 2026-03-10T07:02:45.765 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:02:45 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[50367]: [10/Mar/2026:07:02:45] ENGINE Bus STARTED 2026-03-10T07:02:46.085 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:02:45 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=warn ts=2026-03-10T07:02:45.764Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=4 err="Post \"https://192.168.123.102:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.102 because it doesn't contain any IP SANs" 2026-03-10T07:02:46.753 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:02:46 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: ::ffff:192.168.123.105 - - [10/Mar/2026:07:02:46] "GET /metrics HTTP/1.1" 200 34774 "" "Prometheus/2.33.4" 2026-03-10T07:02:46.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:46 vm05 ceph-mon[48591]: Standby manager daemon y started 2026-03-10T07:02:46.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:46 vm05 ceph-mon[48591]: from='mgr.? 192.168.123.102:0/3678840571' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/crt"}]: dispatch 2026-03-10T07:02:46.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:46 vm05 ceph-mon[48591]: from='mgr.? 192.168.123.102:0/3678840571' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T07:02:46.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:46 vm05 ceph-mon[48591]: from='mgr.? 192.168.123.102:0/3678840571' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/key"}]: dispatch 2026-03-10T07:02:46.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:46 vm05 ceph-mon[48591]: from='mgr.? 192.168.123.102:0/3678840571' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T07:02:46.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:46 vm05 ceph-mon[48591]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:46.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:46 vm05 ceph-mon[48591]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:46.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:46 vm05 ceph-mon[48591]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm05", "name": "osd_memory_target"}]: dispatch 2026-03-10T07:02:46.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:46 vm05 ceph-mon[48591]: from='mgr.24776 ' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm05", "name": "osd_memory_target"}]: dispatch 2026-03-10T07:02:46.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:46 vm05 ceph-mon[48591]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:02:46.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:46 vm05 ceph-mon[48591]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T07:02:46.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:46 vm05 ceph-mon[48591]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:46.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:46 vm05 ceph-mon[48591]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:46.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:46 vm05 ceph-mon[48591]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:46.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:46 vm05 ceph-mon[48591]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:46.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:46 vm05 ceph-mon[48591]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:46.836 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:46 vm02 ceph-mon[50158]: Standby manager daemon y started 2026-03-10T07:02:46.836 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:46 vm02 ceph-mon[50158]: from='mgr.? 192.168.123.102:0/3678840571' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/crt"}]: dispatch 2026-03-10T07:02:46.836 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:46 vm02 ceph-mon[50158]: from='mgr.? 192.168.123.102:0/3678840571' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T07:02:46.836 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:46 vm02 ceph-mon[50158]: from='mgr.? 192.168.123.102:0/3678840571' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/key"}]: dispatch 2026-03-10T07:02:46.836 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:46 vm02 ceph-mon[50158]: from='mgr.? 192.168.123.102:0/3678840571' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T07:02:46.836 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:46 vm02 ceph-mon[50158]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:46.836 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:46 vm02 ceph-mon[50158]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:46.836 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:46 vm02 ceph-mon[50158]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm05", "name": "osd_memory_target"}]: dispatch 2026-03-10T07:02:46.836 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:46 vm02 ceph-mon[50158]: from='mgr.24776 ' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm05", "name": "osd_memory_target"}]: dispatch 2026-03-10T07:02:46.836 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:46 vm02 ceph-mon[50158]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:02:46.836 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:46 vm02 ceph-mon[50158]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T07:02:46.836 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:46 vm02 ceph-mon[50158]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:46.836 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:46 vm02 ceph-mon[50158]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:46.836 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:46 vm02 ceph-mon[50158]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:46.836 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:46 vm02 ceph-mon[50158]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:46.836 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:46 vm02 ceph-mon[50158]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:46.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:46 vm02 ceph-mon[54377]: Standby manager daemon y started 2026-03-10T07:02:46.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:46 vm02 ceph-mon[54377]: from='mgr.? 192.168.123.102:0/3678840571' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/crt"}]: dispatch 2026-03-10T07:02:46.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:46 vm02 ceph-mon[54377]: from='mgr.? 192.168.123.102:0/3678840571' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T07:02:46.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:46 vm02 ceph-mon[54377]: from='mgr.? 192.168.123.102:0/3678840571' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/key"}]: dispatch 2026-03-10T07:02:46.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:46 vm02 ceph-mon[54377]: from='mgr.? 192.168.123.102:0/3678840571' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T07:02:46.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:46 vm02 ceph-mon[54377]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:46.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:46 vm02 ceph-mon[54377]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:46.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:46 vm02 ceph-mon[54377]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm05", "name": "osd_memory_target"}]: dispatch 2026-03-10T07:02:46.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:46 vm02 ceph-mon[54377]: from='mgr.24776 ' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm05", "name": "osd_memory_target"}]: dispatch 2026-03-10T07:02:46.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:46 vm02 ceph-mon[54377]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:02:46.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:46 vm02 ceph-mon[54377]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T07:02:46.837 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:46 vm02 ceph-mon[54377]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:46.837 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:46 vm02 ceph-mon[54377]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:46.837 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:46 vm02 ceph-mon[54377]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:46.837 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:46 vm02 ceph-mon[54377]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:46.837 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:46 vm02 ceph-mon[54377]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:46.837 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:02:46 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=warn ts=2026-03-10T07:02:46.622Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=5 err="Post \"https://192.168.123.102:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.102 because it doesn't contain any IP SANs" 2026-03-10T07:02:47.116 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 07:02:46 vm02 systemd[1]: Stopping Ceph node-exporter.a for 28bd35e6-1c4e-11f1-9057-21b3549603fc... 2026-03-10T07:02:47.116 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 07:02:47 vm02 podman[81081]: 2026-03-10 07:02:47.032188986 +0000 UTC m=+0.018442381 container died 79b6fdafea6ad96214035972a09354bc27a3d556bc56cc058156a03f7f9f6ce4 (image=quay.io/prometheus/node-exporter:v1.3.1, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a, maintainer=The Prometheus Authors ) 2026-03-10T07:02:47.116 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 07:02:47 vm02 podman[81081]: 2026-03-10 07:02:47.054833141 +0000 UTC m=+0.041086525 container remove 79b6fdafea6ad96214035972a09354bc27a3d556bc56cc058156a03f7f9f6ce4 (image=quay.io/prometheus/node-exporter:v1.3.1, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a, maintainer=The Prometheus Authors ) 2026-03-10T07:02:47.116 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 07:02:47 vm02 bash[81081]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a 2026-03-10T07:02:47.116 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 07:02:47 vm02 systemd[1]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@node-exporter.a.service: Main process exited, code=exited, status=143/n/a 2026-03-10T07:02:47.424 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 07:02:47 vm02 systemd[1]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@node-exporter.a.service: Failed with result 'exit-code'. 2026-03-10T07:02:47.425 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 07:02:47 vm02 systemd[1]: Stopped Ceph node-exporter.a for 28bd35e6-1c4e-11f1-9057-21b3549603fc. 2026-03-10T07:02:47.425 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 07:02:47 vm02 systemd[1]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@node-exporter.a.service: Consumed 1.148s CPU time. 2026-03-10T07:02:47.425 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 07:02:47 vm02 systemd[1]: Starting Ceph node-exporter.a for 28bd35e6-1c4e-11f1-9057-21b3549603fc... 2026-03-10T07:02:47.784 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:47 vm02 ceph-mon[54377]: Updating vm02:/etc/ceph/ceph.conf 2026-03-10T07:02:47.785 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:47 vm02 ceph-mon[54377]: Updating vm05:/etc/ceph/ceph.conf 2026-03-10T07:02:47.785 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:47 vm02 ceph-mon[54377]: Updating vm02:/var/lib/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/config/ceph.conf 2026-03-10T07:02:47.785 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:47 vm02 ceph-mon[54377]: Updating vm05:/var/lib/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/config/ceph.conf 2026-03-10T07:02:47.785 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:47 vm02 ceph-mon[54377]: Updating vm02:/etc/ceph/ceph.client.admin.keyring 2026-03-10T07:02:47.785 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:47 vm02 ceph-mon[54377]: Updating vm05:/etc/ceph/ceph.client.admin.keyring 2026-03-10T07:02:47.785 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:47 vm02 ceph-mon[54377]: Updating vm02:/var/lib/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/config/ceph.client.admin.keyring 2026-03-10T07:02:47.785 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:47 vm02 ceph-mon[54377]: Updating vm05:/var/lib/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/config/ceph.client.admin.keyring 2026-03-10T07:02:47.785 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:47 vm02 ceph-mon[54377]: mgrmap e24: x(active, since 6s), standbys: y 2026-03-10T07:02:47.785 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:47 vm02 ceph-mon[54377]: pgmap v6: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail 2026-03-10T07:02:47.785 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:47 vm02 ceph-mon[54377]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-10T07:02:47.785 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:47 vm02 ceph-mon[54377]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:47.785 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:47 vm02 ceph-mon[54377]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:47.785 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:47 vm02 ceph-mon[54377]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:47.785 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:47 vm02 ceph-mon[54377]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:47.785 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:47 vm02 ceph-mon[50158]: Updating vm02:/etc/ceph/ceph.conf 2026-03-10T07:02:47.785 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:47 vm02 ceph-mon[50158]: Updating vm05:/etc/ceph/ceph.conf 2026-03-10T07:02:47.785 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:47 vm02 ceph-mon[50158]: Updating vm02:/var/lib/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/config/ceph.conf 2026-03-10T07:02:47.785 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:47 vm02 ceph-mon[50158]: Updating vm05:/var/lib/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/config/ceph.conf 2026-03-10T07:02:47.786 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:47 vm02 ceph-mon[50158]: Updating vm02:/etc/ceph/ceph.client.admin.keyring 2026-03-10T07:02:47.786 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:47 vm02 ceph-mon[50158]: Updating vm05:/etc/ceph/ceph.client.admin.keyring 2026-03-10T07:02:47.786 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:47 vm02 ceph-mon[50158]: Updating vm02:/var/lib/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/config/ceph.client.admin.keyring 2026-03-10T07:02:47.786 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:47 vm02 ceph-mon[50158]: Updating vm05:/var/lib/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/config/ceph.client.admin.keyring 2026-03-10T07:02:47.786 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:47 vm02 ceph-mon[50158]: mgrmap e24: x(active, since 6s), standbys: y 2026-03-10T07:02:47.786 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:47 vm02 ceph-mon[50158]: pgmap v6: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail 2026-03-10T07:02:47.786 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:47 vm02 ceph-mon[50158]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-10T07:02:47.786 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:47 vm02 ceph-mon[50158]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:47.786 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:47 vm02 ceph-mon[50158]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:47.786 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:47 vm02 ceph-mon[50158]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:47.786 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:47 vm02 ceph-mon[50158]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:47.786 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 07:02:47 vm02 bash[81193]: Trying to pull quay.io/prometheus/node-exporter:v1.7.0... 2026-03-10T07:02:48.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:47 vm05 ceph-mon[48591]: Updating vm02:/etc/ceph/ceph.conf 2026-03-10T07:02:48.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:47 vm05 ceph-mon[48591]: Updating vm05:/etc/ceph/ceph.conf 2026-03-10T07:02:48.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:47 vm05 ceph-mon[48591]: Updating vm02:/var/lib/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/config/ceph.conf 2026-03-10T07:02:48.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:47 vm05 ceph-mon[48591]: Updating vm05:/var/lib/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/config/ceph.conf 2026-03-10T07:02:48.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:47 vm05 ceph-mon[48591]: Updating vm02:/etc/ceph/ceph.client.admin.keyring 2026-03-10T07:02:48.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:47 vm05 ceph-mon[48591]: Updating vm05:/etc/ceph/ceph.client.admin.keyring 2026-03-10T07:02:48.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:47 vm05 ceph-mon[48591]: Updating vm02:/var/lib/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/config/ceph.client.admin.keyring 2026-03-10T07:02:48.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:47 vm05 ceph-mon[48591]: Updating vm05:/var/lib/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/config/ceph.client.admin.keyring 2026-03-10T07:02:48.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:47 vm05 ceph-mon[48591]: mgrmap e24: x(active, since 6s), standbys: y 2026-03-10T07:02:48.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:47 vm05 ceph-mon[48591]: pgmap v6: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail 2026-03-10T07:02:48.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:47 vm05 ceph-mon[48591]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-10T07:02:48.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:47 vm05 ceph-mon[48591]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:48.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:47 vm05 ceph-mon[48591]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:48.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:47 vm05 ceph-mon[48591]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:48.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:47 vm05 ceph-mon[48591]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:48.085 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:02:47 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[50367]: ::ffff:192.168.123.105 - - [10/Mar/2026:07:02:47] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-10T07:02:48.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:48 vm02 ceph-mon[54377]: Reconfiguring node-exporter.a (dependencies changed)... 2026-03-10T07:02:48.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:48 vm02 ceph-mon[54377]: Deploying daemon node-exporter.a on vm02 2026-03-10T07:02:48.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:48 vm02 ceph-mon[54377]: from='client.14784 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:02:48.835 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 07:02:48 vm02 bash[81193]: Getting image source signatures 2026-03-10T07:02:48.835 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 07:02:48 vm02 bash[81193]: Copying blob sha256:324153f2810a9927fcce320af9e4e291e0b6e805cbdd1f338386c756b9defa24 2026-03-10T07:02:48.835 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 07:02:48 vm02 bash[81193]: Copying blob sha256:2abcce694348cd2c949c0e98a7400ebdfd8341021bcf6b541bc72033ce982510 2026-03-10T07:02:48.835 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 07:02:48 vm02 bash[81193]: Copying blob sha256:455fd88e5221bc1e278ef2d059cd70e4df99a24e5af050ede621534276f6cf9a 2026-03-10T07:02:48.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:48 vm02 ceph-mon[50158]: Reconfiguring node-exporter.a (dependencies changed)... 2026-03-10T07:02:48.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:48 vm02 ceph-mon[50158]: Deploying daemon node-exporter.a on vm02 2026-03-10T07:02:48.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:48 vm02 ceph-mon[50158]: from='client.14784 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:02:49.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:48 vm05 ceph-mon[48591]: Reconfiguring node-exporter.a (dependencies changed)... 2026-03-10T07:02:49.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:48 vm05 ceph-mon[48591]: Deploying daemon node-exporter.a on vm02 2026-03-10T07:02:49.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:48 vm05 ceph-mon[48591]: from='client.14784 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:02:49.585 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 07:02:49 vm02 bash[81193]: Copying config sha256:72c9c208898624938c9e4183d6686ea4a5fd3f912bc29bc3f00147924c521a3e 2026-03-10T07:02:49.585 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 07:02:49 vm02 bash[81193]: Writing manifest to image destination 2026-03-10T07:02:49.585 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 07:02:49 vm02 podman[81193]: 2026-03-10 07:02:49.313873948 +0000 UTC m=+1.900224418 container create 0bc0b34c732af84a19cf90445e30e5b59051965f4df530eadc55f6ab2d4ff271 (image=quay.io/prometheus/node-exporter:v1.7.0, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a, maintainer=The Prometheus Authors ) 2026-03-10T07:02:49.585 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 07:02:49 vm02 podman[81193]: 2026-03-10 07:02:49.307083288 +0000 UTC m=+1.893433767 image pull 72c9c208898624938c9e4183d6686ea4a5fd3f912bc29bc3f00147924c521a3e quay.io/prometheus/node-exporter:v1.7.0 2026-03-10T07:02:49.585 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 07:02:49 vm02 podman[81193]: 2026-03-10 07:02:49.34428533 +0000 UTC m=+1.930635810 container init 0bc0b34c732af84a19cf90445e30e5b59051965f4df530eadc55f6ab2d4ff271 (image=quay.io/prometheus/node-exporter:v1.7.0, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a, maintainer=The Prometheus Authors ) 2026-03-10T07:02:49.585 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 07:02:49 vm02 podman[81193]: 2026-03-10 07:02:49.34689256 +0000 UTC m=+1.933243030 container start 0bc0b34c732af84a19cf90445e30e5b59051965f4df530eadc55f6ab2d4ff271 (image=quay.io/prometheus/node-exporter:v1.7.0, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a, maintainer=The Prometheus Authors ) 2026-03-10T07:02:49.585 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 07:02:49 vm02 bash[81193]: 0bc0b34c732af84a19cf90445e30e5b59051965f4df530eadc55f6ab2d4ff271 2026-03-10T07:02:49.585 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 07:02:49 vm02 systemd[1]: Started Ceph node-exporter.a for 28bd35e6-1c4e-11f1-9057-21b3549603fc. 2026-03-10T07:02:49.585 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 07:02:49 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[81250]: ts=2026-03-10T07:02:49.357Z caller=node_exporter.go:192 level=info msg="Starting node_exporter" version="(version=1.7.0, branch=HEAD, revision=7333465abf9efba81876303bb57e6fadb946041b)" 2026-03-10T07:02:49.586 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 07:02:49 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[81250]: ts=2026-03-10T07:02:49.357Z caller=node_exporter.go:193 level=info msg="Build context" build_context="(go=go1.21.4, platform=linux/amd64, user=root@35918982f6d8, date=20231112-23:53:35, tags=netgo osusergo static_build)" 2026-03-10T07:02:49.586 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 07:02:49 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[81250]: ts=2026-03-10T07:02:49.358Z caller=filesystem_common.go:111 level=info collector=filesystem msg="Parsed flag --collector.filesystem.mount-points-exclude" flag=^/(dev|proc|run/credentials/.+|sys|var/lib/docker/.+|var/lib/containers/storage/.+)($|/) 2026-03-10T07:02:49.586 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 07:02:49 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[81250]: ts=2026-03-10T07:02:49.358Z caller=filesystem_common.go:113 level=info collector=filesystem msg="Parsed flag --collector.filesystem.fs-types-exclude" flag=^(autofs|binfmt_misc|bpf|cgroup2?|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|iso9660|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|sysfs|tracefs)$ 2026-03-10T07:02:49.586 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 07:02:49 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[81250]: ts=2026-03-10T07:02:49.358Z caller=diskstats_common.go:111 level=info collector=diskstats msg="Parsed flag --collector.diskstats.device-exclude" flag=^(ram|loop|fd|(h|s|v|xv)d[a-z]|nvme\d+n\d+p)\d+$ 2026-03-10T07:02:49.586 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 07:02:49 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[81250]: ts=2026-03-10T07:02:49.358Z caller=diskstats_linux.go:265 level=error collector=diskstats msg="Failed to open directory, disabling udev device properties" path=/run/udev/data 2026-03-10T07:02:49.586 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 07:02:49 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[81250]: ts=2026-03-10T07:02:49.358Z caller=node_exporter.go:110 level=info msg="Enabled collectors" 2026-03-10T07:02:49.586 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 07:02:49 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[81250]: ts=2026-03-10T07:02:49.358Z caller=node_exporter.go:117 level=info collector=arp 2026-03-10T07:02:49.586 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 07:02:49 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[81250]: ts=2026-03-10T07:02:49.358Z caller=node_exporter.go:117 level=info collector=bcache 2026-03-10T07:02:49.586 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:49 vm02 ceph-mon[50158]: pgmap v7: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail; 27 KiB/s rd, 0 B/s wr, 11 op/s 2026-03-10T07:02:49.586 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:49 vm02 ceph-mon[50158]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:49.586 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:49 vm02 ceph-mon[50158]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:49.586 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 07:02:49 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[81250]: ts=2026-03-10T07:02:49.358Z caller=node_exporter.go:117 level=info collector=bonding 2026-03-10T07:02:49.586 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 07:02:49 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[81250]: ts=2026-03-10T07:02:49.358Z caller=node_exporter.go:117 level=info collector=btrfs 2026-03-10T07:02:49.586 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 07:02:49 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[81250]: ts=2026-03-10T07:02:49.358Z caller=node_exporter.go:117 level=info collector=conntrack 2026-03-10T07:02:49.586 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 07:02:49 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[81250]: ts=2026-03-10T07:02:49.358Z caller=node_exporter.go:117 level=info collector=cpu 2026-03-10T07:02:49.586 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 07:02:49 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[81250]: ts=2026-03-10T07:02:49.358Z caller=node_exporter.go:117 level=info collector=cpufreq 2026-03-10T07:02:49.586 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 07:02:49 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[81250]: ts=2026-03-10T07:02:49.358Z caller=node_exporter.go:117 level=info collector=diskstats 2026-03-10T07:02:49.586 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 07:02:49 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[81250]: ts=2026-03-10T07:02:49.358Z caller=node_exporter.go:117 level=info collector=dmi 2026-03-10T07:02:49.587 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 07:02:49 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[81250]: ts=2026-03-10T07:02:49.358Z caller=node_exporter.go:117 level=info collector=edac 2026-03-10T07:02:49.587 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 07:02:49 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[81250]: ts=2026-03-10T07:02:49.358Z caller=node_exporter.go:117 level=info collector=entropy 2026-03-10T07:02:49.587 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 07:02:49 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[81250]: ts=2026-03-10T07:02:49.358Z caller=node_exporter.go:117 level=info collector=fibrechannel 2026-03-10T07:02:49.587 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 07:02:49 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[81250]: ts=2026-03-10T07:02:49.358Z caller=node_exporter.go:117 level=info collector=filefd 2026-03-10T07:02:49.587 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 07:02:49 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[81250]: ts=2026-03-10T07:02:49.358Z caller=node_exporter.go:117 level=info collector=filesystem 2026-03-10T07:02:49.587 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 07:02:49 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[81250]: ts=2026-03-10T07:02:49.358Z caller=node_exporter.go:117 level=info collector=hwmon 2026-03-10T07:02:49.587 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 07:02:49 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[81250]: ts=2026-03-10T07:02:49.358Z caller=node_exporter.go:117 level=info collector=infiniband 2026-03-10T07:02:49.587 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 07:02:49 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[81250]: ts=2026-03-10T07:02:49.358Z caller=node_exporter.go:117 level=info collector=ipvs 2026-03-10T07:02:49.587 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 07:02:49 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[81250]: ts=2026-03-10T07:02:49.358Z caller=node_exporter.go:117 level=info collector=loadavg 2026-03-10T07:02:49.587 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 07:02:49 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[81250]: ts=2026-03-10T07:02:49.358Z caller=node_exporter.go:117 level=info collector=mdadm 2026-03-10T07:02:49.587 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 07:02:49 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[81250]: ts=2026-03-10T07:02:49.358Z caller=node_exporter.go:117 level=info collector=meminfo 2026-03-10T07:02:49.587 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 07:02:49 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[81250]: ts=2026-03-10T07:02:49.358Z caller=node_exporter.go:117 level=info collector=netclass 2026-03-10T07:02:49.587 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 07:02:49 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[81250]: ts=2026-03-10T07:02:49.358Z caller=node_exporter.go:117 level=info collector=netdev 2026-03-10T07:02:49.587 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 07:02:49 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[81250]: ts=2026-03-10T07:02:49.358Z caller=node_exporter.go:117 level=info collector=netstat 2026-03-10T07:02:49.587 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 07:02:49 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[81250]: ts=2026-03-10T07:02:49.358Z caller=node_exporter.go:117 level=info collector=nfs 2026-03-10T07:02:49.587 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 07:02:49 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[81250]: ts=2026-03-10T07:02:49.358Z caller=node_exporter.go:117 level=info collector=nfsd 2026-03-10T07:02:49.587 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 07:02:49 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[81250]: ts=2026-03-10T07:02:49.358Z caller=node_exporter.go:117 level=info collector=nvme 2026-03-10T07:02:49.587 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 07:02:49 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[81250]: ts=2026-03-10T07:02:49.358Z caller=node_exporter.go:117 level=info collector=os 2026-03-10T07:02:49.587 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 07:02:49 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[81250]: ts=2026-03-10T07:02:49.358Z caller=node_exporter.go:117 level=info collector=powersupplyclass 2026-03-10T07:02:49.587 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 07:02:49 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[81250]: ts=2026-03-10T07:02:49.358Z caller=node_exporter.go:117 level=info collector=pressure 2026-03-10T07:02:49.587 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 07:02:49 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[81250]: ts=2026-03-10T07:02:49.358Z caller=node_exporter.go:117 level=info collector=rapl 2026-03-10T07:02:49.587 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 07:02:49 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[81250]: ts=2026-03-10T07:02:49.358Z caller=node_exporter.go:117 level=info collector=schedstat 2026-03-10T07:02:49.587 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 07:02:49 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[81250]: ts=2026-03-10T07:02:49.358Z caller=node_exporter.go:117 level=info collector=selinux 2026-03-10T07:02:49.587 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 07:02:49 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[81250]: ts=2026-03-10T07:02:49.358Z caller=node_exporter.go:117 level=info collector=sockstat 2026-03-10T07:02:49.587 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 07:02:49 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[81250]: ts=2026-03-10T07:02:49.358Z caller=node_exporter.go:117 level=info collector=softnet 2026-03-10T07:02:49.587 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 07:02:49 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[81250]: ts=2026-03-10T07:02:49.358Z caller=node_exporter.go:117 level=info collector=stat 2026-03-10T07:02:49.587 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 07:02:49 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[81250]: ts=2026-03-10T07:02:49.358Z caller=node_exporter.go:117 level=info collector=tapestats 2026-03-10T07:02:49.587 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 07:02:49 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[81250]: ts=2026-03-10T07:02:49.358Z caller=node_exporter.go:117 level=info collector=textfile 2026-03-10T07:02:49.587 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 07:02:49 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[81250]: ts=2026-03-10T07:02:49.358Z caller=node_exporter.go:117 level=info collector=thermal_zone 2026-03-10T07:02:49.587 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 07:02:49 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[81250]: ts=2026-03-10T07:02:49.358Z caller=node_exporter.go:117 level=info collector=time 2026-03-10T07:02:49.587 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 07:02:49 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[81250]: ts=2026-03-10T07:02:49.358Z caller=node_exporter.go:117 level=info collector=udp_queues 2026-03-10T07:02:49.587 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 07:02:49 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[81250]: ts=2026-03-10T07:02:49.358Z caller=node_exporter.go:117 level=info collector=uname 2026-03-10T07:02:49.587 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 07:02:49 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[81250]: ts=2026-03-10T07:02:49.358Z caller=node_exporter.go:117 level=info collector=vmstat 2026-03-10T07:02:49.587 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 07:02:49 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[81250]: ts=2026-03-10T07:02:49.358Z caller=node_exporter.go:117 level=info collector=xfs 2026-03-10T07:02:49.587 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 07:02:49 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[81250]: ts=2026-03-10T07:02:49.358Z caller=node_exporter.go:117 level=info collector=zfs 2026-03-10T07:02:49.587 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 07:02:49 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[81250]: ts=2026-03-10T07:02:49.358Z caller=tls_config.go:274 level=info msg="Listening on" address=[::]:9100 2026-03-10T07:02:49.587 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 07:02:49 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a[81250]: ts=2026-03-10T07:02:49.358Z caller=tls_config.go:277 level=info msg="TLS is disabled." http2=false address=[::]:9100 2026-03-10T07:02:49.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:49 vm02 ceph-mon[54377]: pgmap v7: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail; 27 KiB/s rd, 0 B/s wr, 11 op/s 2026-03-10T07:02:49.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:49 vm02 ceph-mon[54377]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:49.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:49 vm02 ceph-mon[54377]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:50.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:49 vm05 ceph-mon[48591]: pgmap v7: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail; 27 KiB/s rd, 0 B/s wr, 11 op/s 2026-03-10T07:02:50.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:49 vm05 ceph-mon[48591]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:50.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:49 vm05 ceph-mon[48591]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:51.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:50 vm05 ceph-mon[48591]: Reconfiguring alertmanager.a (dependencies changed)... 2026-03-10T07:02:51.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:50 vm05 ceph-mon[48591]: Deploying daemon alertmanager.a on vm02 2026-03-10T07:02:51.084 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:50 vm02 ceph-mon[54377]: Reconfiguring alertmanager.a (dependencies changed)... 2026-03-10T07:02:51.084 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:50 vm02 ceph-mon[54377]: Deploying daemon alertmanager.a on vm02 2026-03-10T07:02:51.084 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:50 vm02 ceph-mon[50158]: Reconfiguring alertmanager.a (dependencies changed)... 2026-03-10T07:02:51.084 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:50 vm02 ceph-mon[50158]: Deploying daemon alertmanager.a on vm02 2026-03-10T07:02:51.986 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:51 vm02 ceph-mon[54377]: pgmap v8: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail; 21 KiB/s rd, 0 B/s wr, 8 op/s 2026-03-10T07:02:51.987 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:51 vm02 ceph-mon[50158]: pgmap v8: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail; 21 KiB/s rd, 0 B/s wr, 8 op/s 2026-03-10T07:02:52.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:51 vm05 ceph-mon[48591]: pgmap v8: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail; 21 KiB/s rd, 0 B/s wr, 8 op/s 2026-03-10T07:02:52.629 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:02:52 vm02 systemd[1]: Stopping Ceph alertmanager.a for 28bd35e6-1c4e-11f1-9057-21b3549603fc... 2026-03-10T07:02:52.629 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:02:52 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[76934]: level=info ts=2026-03-10T07:02:52.442Z caller=main.go:557 msg="Received SIGTERM, exiting gracefully..." 2026-03-10T07:02:52.629 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:02:52 vm02 podman[81516]: 2026-03-10 07:02:52.454461852 +0000 UTC m=+0.028042870 container died 519616b4f7a0841745aeb5c02799efc9406a4e4cf1a8e913488325b523e9cf74 (image=quay.io/prometheus/alertmanager:v0.23.0, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a, maintainer=The Prometheus Authors ) 2026-03-10T07:02:52.629 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:02:52 vm02 podman[81516]: 2026-03-10 07:02:52.469728026 +0000 UTC m=+0.043309045 container remove 519616b4f7a0841745aeb5c02799efc9406a4e4cf1a8e913488325b523e9cf74 (image=quay.io/prometheus/alertmanager:v0.23.0, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a, maintainer=The Prometheus Authors ) 2026-03-10T07:02:52.629 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:02:52 vm02 podman[81516]: 2026-03-10 07:02:52.470756371 +0000 UTC m=+0.044337389 volume remove 187dad9c09a9a22c5de6d79f934fdcc1defa9dbaac404cae84527bf63abb6672 2026-03-10T07:02:52.629 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:02:52 vm02 bash[81516]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a 2026-03-10T07:02:52.629 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:02:52 vm02 systemd[1]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@alertmanager.a.service: Deactivated successfully. 2026-03-10T07:02:52.630 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:02:52 vm02 systemd[1]: Stopped Ceph alertmanager.a for 28bd35e6-1c4e-11f1-9057-21b3549603fc. 2026-03-10T07:02:53.085 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:02:52 vm02 systemd[1]: Starting Ceph alertmanager.a for 28bd35e6-1c4e-11f1-9057-21b3549603fc... 2026-03-10T07:02:53.085 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:02:52 vm02 podman[81626]: 2026-03-10 07:02:52.840399287 +0000 UTC m=+0.017263264 volume create dc325bb8f832a7ea520371bd784b9c962a26d99997a0dbd3c5e6e503d0e1ef8c 2026-03-10T07:02:53.085 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:02:52 vm02 podman[81626]: 2026-03-10 07:02:52.84400128 +0000 UTC m=+0.020865267 container create 520cbcc5ad9835ee8cbaf7318bda577029ffd2a5694a8cff9072c845b97e28f0 (image=quay.io/prometheus/alertmanager:v0.25.0, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a, maintainer=The Prometheus Authors ) 2026-03-10T07:02:53.085 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:02:52 vm02 podman[81626]: 2026-03-10 07:02:52.872054187 +0000 UTC m=+0.048918184 container init 520cbcc5ad9835ee8cbaf7318bda577029ffd2a5694a8cff9072c845b97e28f0 (image=quay.io/prometheus/alertmanager:v0.25.0, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a, maintainer=The Prometheus Authors ) 2026-03-10T07:02:53.085 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:02:52 vm02 podman[81626]: 2026-03-10 07:02:52.874181719 +0000 UTC m=+0.051045706 container start 520cbcc5ad9835ee8cbaf7318bda577029ffd2a5694a8cff9072c845b97e28f0 (image=quay.io/prometheus/alertmanager:v0.25.0, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a, maintainer=The Prometheus Authors ) 2026-03-10T07:02:53.085 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:02:52 vm02 bash[81626]: 520cbcc5ad9835ee8cbaf7318bda577029ffd2a5694a8cff9072c845b97e28f0 2026-03-10T07:02:53.085 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:02:52 vm02 podman[81626]: 2026-03-10 07:02:52.834248585 +0000 UTC m=+0.011112582 image pull c8568f914cd25b2062c44e9f79f9c18da6e3b85fe0c47a12a2191c61426c2b19 quay.io/prometheus/alertmanager:v0.25.0 2026-03-10T07:02:53.085 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:02:52 vm02 systemd[1]: Started Ceph alertmanager.a for 28bd35e6-1c4e-11f1-9057-21b3549603fc. 2026-03-10T07:02:53.085 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:02:52 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[81637]: ts=2026-03-10T07:02:52.893Z caller=main.go:240 level=info msg="Starting Alertmanager" version="(version=0.25.0, branch=HEAD, revision=258fab7cdd551f2cf251ed0348f0ad7289aee789)" 2026-03-10T07:02:53.085 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:02:52 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[81637]: ts=2026-03-10T07:02:52.893Z caller=main.go:241 level=info build_context="(go=go1.19.4, user=root@abe866dd5717, date=20221222-14:51:36)" 2026-03-10T07:02:53.085 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:02:52 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[81637]: ts=2026-03-10T07:02:52.894Z caller=cluster.go:185 level=info component=cluster msg="setting advertise address explicitly" addr=192.168.123.102 port=9094 2026-03-10T07:02:53.085 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:02:52 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[81637]: ts=2026-03-10T07:02:52.896Z caller=cluster.go:681 level=info component=cluster msg="Waiting for gossip to settle..." interval=2s 2026-03-10T07:02:53.085 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:02:52 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[81637]: ts=2026-03-10T07:02:52.919Z caller=coordinator.go:113 level=info component=configuration msg="Loading configuration file" file=/etc/alertmanager/alertmanager.yml 2026-03-10T07:02:53.085 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:02:52 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[81637]: ts=2026-03-10T07:02:52.919Z caller=coordinator.go:126 level=info component=configuration msg="Completed loading of configuration file" file=/etc/alertmanager/alertmanager.yml 2026-03-10T07:02:53.085 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:02:52 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[81637]: ts=2026-03-10T07:02:52.920Z caller=tls_config.go:232 level=info msg="Listening on" address=[::]:9093 2026-03-10T07:02:53.085 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:02:52 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[81637]: ts=2026-03-10T07:02:52.920Z caller=tls_config.go:235 level=info msg="TLS is disabled." http2=false address=[::]:9093 2026-03-10T07:02:53.773 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:53 vm02 ceph-mon[54377]: pgmap v9: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail; 17 KiB/s rd, 0 B/s wr, 7 op/s 2026-03-10T07:02:53.773 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:53 vm02 ceph-mon[54377]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:53.773 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:53 vm02 ceph-mon[54377]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:53.773 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:53 vm02 ceph-mon[54377]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm02.iphfbm", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T07:02:53.773 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:53 vm02 ceph-mon[54377]: from='mgr.24776 ' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm02.iphfbm", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T07:02:53.773 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:53 vm02 ceph-mon[54377]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:02:53.773 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:53 vm02 ceph-mon[50158]: pgmap v9: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail; 17 KiB/s rd, 0 B/s wr, 7 op/s 2026-03-10T07:02:53.773 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:53 vm02 ceph-mon[50158]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:53.773 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:53 vm02 ceph-mon[50158]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:53.773 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:53 vm02 ceph-mon[50158]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm02.iphfbm", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T07:02:53.773 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:53 vm02 ceph-mon[50158]: from='mgr.24776 ' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm02.iphfbm", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T07:02:53.773 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:53 vm02 ceph-mon[50158]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:02:53.905 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:53 vm05 ceph-mon[48591]: pgmap v9: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail; 17 KiB/s rd, 0 B/s wr, 7 op/s 2026-03-10T07:02:53.905 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:53 vm05 ceph-mon[48591]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:53.905 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:53 vm05 ceph-mon[48591]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:53.905 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:53 vm05 ceph-mon[48591]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm02.iphfbm", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T07:02:53.905 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:53 vm05 ceph-mon[48591]: from='mgr.24776 ' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm02.iphfbm", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T07:02:53.905 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:53 vm05 ceph-mon[48591]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:02:54.253 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 07:02:54 vm05 systemd[1]: Stopping Ceph node-exporter.b for 28bd35e6-1c4e-11f1-9057-21b3549603fc... 2026-03-10T07:02:54.253 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 07:02:54 vm05 podman[70877]: 2026-03-10 07:02:54.091051194 +0000 UTC m=+0.017375323 container died 4912deba3ea04b6574b25f06d3c8c5646518dcaeb67f5ecd6ac107b510c7c9e3 (image=quay.io/prometheus/node-exporter:v1.3.1, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b, maintainer=The Prometheus Authors ) 2026-03-10T07:02:54.253 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 07:02:54 vm05 podman[70877]: 2026-03-10 07:02:54.120740018 +0000 UTC m=+0.047064147 container remove 4912deba3ea04b6574b25f06d3c8c5646518dcaeb67f5ecd6ac107b510c7c9e3 (image=quay.io/prometheus/node-exporter:v1.3.1, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b, maintainer=The Prometheus Authors ) 2026-03-10T07:02:54.253 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 07:02:54 vm05 bash[70877]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b 2026-03-10T07:02:54.254 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 07:02:54 vm05 systemd[1]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@node-exporter.b.service: Main process exited, code=exited, status=143/n/a 2026-03-10T07:02:54.254 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 07:02:54 vm05 systemd[1]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@node-exporter.b.service: Failed with result 'exit-code'. 2026-03-10T07:02:54.254 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 07:02:54 vm05 systemd[1]: Stopped Ceph node-exporter.b for 28bd35e6-1c4e-11f1-9057-21b3549603fc. 2026-03-10T07:02:54.254 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 07:02:54 vm05 systemd[1]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@node-exporter.b.service: Consumed 1.171s CPU time. 2026-03-10T07:02:54.602 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 07:02:54 vm05 systemd[1]: Starting Ceph node-exporter.b for 28bd35e6-1c4e-11f1-9057-21b3549603fc... 2026-03-10T07:02:54.602 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 07:02:54 vm05 bash[70987]: Trying to pull quay.io/prometheus/node-exporter:v1.7.0... 2026-03-10T07:02:54.896 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:54 vm02 ceph-mon[54377]: Reconfiguring iscsi.foo.vm02.iphfbm (dependencies changed)... 2026-03-10T07:02:54.897 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:54 vm02 ceph-mon[54377]: Reconfiguring daemon iscsi.foo.vm02.iphfbm on vm02 2026-03-10T07:02:54.897 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:54 vm02 ceph-mon[54377]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:54.897 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:54 vm02 ceph-mon[54377]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:54.897 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:54 vm02 ceph-mon[54377]: from='client.? 192.168.123.102:0/3931259428' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-10T07:02:54.897 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:54 vm02 ceph-mon[54377]: from='client.? 192.168.123.102:0/2781047516' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:6801/596000248"}]: dispatch 2026-03-10T07:02:54.897 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:54 vm02 ceph-mon[50158]: Reconfiguring iscsi.foo.vm02.iphfbm (dependencies changed)... 2026-03-10T07:02:54.897 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:54 vm02 ceph-mon[50158]: Reconfiguring daemon iscsi.foo.vm02.iphfbm on vm02 2026-03-10T07:02:54.897 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:54 vm02 ceph-mon[50158]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:54.897 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:54 vm02 ceph-mon[50158]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:54.897 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:54 vm02 ceph-mon[50158]: from='client.? 192.168.123.102:0/3931259428' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-10T07:02:54.897 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:54 vm02 ceph-mon[50158]: from='client.? 192.168.123.102:0/2781047516' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:6801/596000248"}]: dispatch 2026-03-10T07:02:55.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:54 vm05 ceph-mon[48591]: Reconfiguring iscsi.foo.vm02.iphfbm (dependencies changed)... 2026-03-10T07:02:55.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:54 vm05 ceph-mon[48591]: Reconfiguring daemon iscsi.foo.vm02.iphfbm on vm02 2026-03-10T07:02:55.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:54 vm05 ceph-mon[48591]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:55.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:54 vm05 ceph-mon[48591]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:55.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:54 vm05 ceph-mon[48591]: from='client.? 192.168.123.102:0/3931259428' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-10T07:02:55.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:54 vm05 ceph-mon[48591]: from='client.? 192.168.123.102:0/2781047516' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:6801/596000248"}]: dispatch 2026-03-10T07:02:55.335 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:02:54 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[81637]: ts=2026-03-10T07:02:54.896Z caller=cluster.go:706 level=info component=cluster msg="gossip not settled" polls=0 before=0 now=1 elapsed=2.000636151s 2026-03-10T07:02:56.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:55 vm05 ceph-mon[48591]: Reconfiguring node-exporter.b (dependencies changed)... 2026-03-10T07:02:56.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:55 vm05 ceph-mon[48591]: Deploying daemon node-exporter.b on vm05 2026-03-10T07:02:56.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:55 vm05 ceph-mon[48591]: pgmap v10: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 0 B/s wr, 6 op/s 2026-03-10T07:02:56.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:55 vm05 ceph-mon[48591]: from='client.? 192.168.123.102:0/2781047516' entity='client.iscsi.foo.vm02.iphfbm' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:6801/596000248"}]': finished 2026-03-10T07:02:56.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:55 vm05 ceph-mon[48591]: osdmap e78: 8 total, 8 up, 8 in 2026-03-10T07:02:56.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:55 vm05 ceph-mon[48591]: from='client.? 192.168.123.102:0/3400503803' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:6800/596000248"}]: dispatch 2026-03-10T07:02:56.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:55 vm05 ceph-mon[48591]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:6800/596000248"}]: dispatch 2026-03-10T07:02:56.003 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 07:02:55 vm05 bash[70987]: Getting image source signatures 2026-03-10T07:02:56.003 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 07:02:55 vm05 bash[70987]: Copying blob sha256:324153f2810a9927fcce320af9e4e291e0b6e805cbdd1f338386c756b9defa24 2026-03-10T07:02:56.004 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 07:02:55 vm05 bash[70987]: Copying blob sha256:2abcce694348cd2c949c0e98a7400ebdfd8341021bcf6b541bc72033ce982510 2026-03-10T07:02:56.004 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 07:02:55 vm05 bash[70987]: Copying blob sha256:455fd88e5221bc1e278ef2d059cd70e4df99a24e5af050ede621534276f6cf9a 2026-03-10T07:02:56.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:55 vm02 ceph-mon[54377]: Reconfiguring node-exporter.b (dependencies changed)... 2026-03-10T07:02:56.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:55 vm02 ceph-mon[54377]: Deploying daemon node-exporter.b on vm05 2026-03-10T07:02:56.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:55 vm02 ceph-mon[54377]: pgmap v10: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 0 B/s wr, 6 op/s 2026-03-10T07:02:56.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:55 vm02 ceph-mon[54377]: from='client.? 192.168.123.102:0/2781047516' entity='client.iscsi.foo.vm02.iphfbm' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:6801/596000248"}]': finished 2026-03-10T07:02:56.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:55 vm02 ceph-mon[54377]: osdmap e78: 8 total, 8 up, 8 in 2026-03-10T07:02:56.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:55 vm02 ceph-mon[54377]: from='client.? 192.168.123.102:0/3400503803' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:6800/596000248"}]: dispatch 2026-03-10T07:02:56.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:55 vm02 ceph-mon[54377]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:6800/596000248"}]: dispatch 2026-03-10T07:02:56.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:55 vm02 ceph-mon[50158]: Reconfiguring node-exporter.b (dependencies changed)... 2026-03-10T07:02:56.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:55 vm02 ceph-mon[50158]: Deploying daemon node-exporter.b on vm05 2026-03-10T07:02:56.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:55 vm02 ceph-mon[50158]: pgmap v10: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 0 B/s wr, 6 op/s 2026-03-10T07:02:56.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:55 vm02 ceph-mon[50158]: from='client.? 192.168.123.102:0/2781047516' entity='client.iscsi.foo.vm02.iphfbm' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:6801/596000248"}]': finished 2026-03-10T07:02:56.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:55 vm02 ceph-mon[50158]: osdmap e78: 8 total, 8 up, 8 in 2026-03-10T07:02:56.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:55 vm02 ceph-mon[50158]: from='client.? 192.168.123.102:0/3400503803' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:6800/596000248"}]: dispatch 2026-03-10T07:02:56.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:55 vm02 ceph-mon[50158]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:6800/596000248"}]: dispatch 2026-03-10T07:02:56.654 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:02:56 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: ::ffff:192.168.123.105 - - [10/Mar/2026:07:02:56] "GET /metrics HTTP/1.1" 200 37763 "" "Prometheus/2.33.4" 2026-03-10T07:02:56.654 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:56 vm05 ceph-mon[48591]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:6800/596000248"}]': finished 2026-03-10T07:02:56.655 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 07:02:56 vm05 bash[70987]: Copying config sha256:72c9c208898624938c9e4183d6686ea4a5fd3f912bc29bc3f00147924c521a3e 2026-03-10T07:02:56.655 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 07:02:56 vm05 bash[70987]: Writing manifest to image destination 2026-03-10T07:02:56.655 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 07:02:56 vm05 podman[70987]: 2026-03-10 07:02:56.388003656 +0000 UTC m=+1.902852391 container create 0129ed456f9d5feae6aeb1883606933102402884ed39fbb418ae0bf34c0ffc52 (image=quay.io/prometheus/node-exporter:v1.7.0, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b, maintainer=The Prometheus Authors ) 2026-03-10T07:02:56.655 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 07:02:56 vm05 podman[70987]: 2026-03-10 07:02:56.381635395 +0000 UTC m=+1.896484130 image pull 72c9c208898624938c9e4183d6686ea4a5fd3f912bc29bc3f00147924c521a3e quay.io/prometheus/node-exporter:v1.7.0 2026-03-10T07:02:56.655 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 07:02:56 vm05 podman[70987]: 2026-03-10 07:02:56.413107815 +0000 UTC m=+1.927956560 container init 0129ed456f9d5feae6aeb1883606933102402884ed39fbb418ae0bf34c0ffc52 (image=quay.io/prometheus/node-exporter:v1.7.0, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b, maintainer=The Prometheus Authors ) 2026-03-10T07:02:56.655 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 07:02:56 vm05 podman[70987]: 2026-03-10 07:02:56.415780455 +0000 UTC m=+1.930629190 container start 0129ed456f9d5feae6aeb1883606933102402884ed39fbb418ae0bf34c0ffc52 (image=quay.io/prometheus/node-exporter:v1.7.0, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b, maintainer=The Prometheus Authors ) 2026-03-10T07:02:56.655 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 07:02:56 vm05 bash[70987]: 0129ed456f9d5feae6aeb1883606933102402884ed39fbb418ae0bf34c0ffc52 2026-03-10T07:02:56.655 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 07:02:56 vm05 systemd[1]: Started Ceph node-exporter.b for 28bd35e6-1c4e-11f1-9057-21b3549603fc. 2026-03-10T07:02:56.655 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 07:02:56 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[71041]: ts=2026-03-10T07:02:56.423Z caller=node_exporter.go:192 level=info msg="Starting node_exporter" version="(version=1.7.0, branch=HEAD, revision=7333465abf9efba81876303bb57e6fadb946041b)" 2026-03-10T07:02:56.655 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 07:02:56 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[71041]: ts=2026-03-10T07:02:56.423Z caller=node_exporter.go:193 level=info msg="Build context" build_context="(go=go1.21.4, platform=linux/amd64, user=root@35918982f6d8, date=20231112-23:53:35, tags=netgo osusergo static_build)" 2026-03-10T07:02:56.655 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 07:02:56 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[71041]: ts=2026-03-10T07:02:56.424Z caller=filesystem_common.go:111 level=info collector=filesystem msg="Parsed flag --collector.filesystem.mount-points-exclude" flag=^/(dev|proc|run/credentials/.+|sys|var/lib/docker/.+|var/lib/containers/storage/.+)($|/) 2026-03-10T07:02:56.655 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 07:02:56 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[71041]: ts=2026-03-10T07:02:56.424Z caller=filesystem_common.go:113 level=info collector=filesystem msg="Parsed flag --collector.filesystem.fs-types-exclude" flag=^(autofs|binfmt_misc|bpf|cgroup2?|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|iso9660|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|sysfs|tracefs)$ 2026-03-10T07:02:56.655 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 07:02:56 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[71041]: ts=2026-03-10T07:02:56.424Z caller=diskstats_common.go:111 level=info collector=diskstats msg="Parsed flag --collector.diskstats.device-exclude" flag=^(ram|loop|fd|(h|s|v|xv)d[a-z]|nvme\d+n\d+p)\d+$ 2026-03-10T07:02:56.655 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 07:02:56 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[71041]: ts=2026-03-10T07:02:56.424Z caller=diskstats_linux.go:265 level=error collector=diskstats msg="Failed to open directory, disabling udev device properties" path=/run/udev/data 2026-03-10T07:02:56.655 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 07:02:56 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[71041]: ts=2026-03-10T07:02:56.424Z caller=node_exporter.go:110 level=info msg="Enabled collectors" 2026-03-10T07:02:56.655 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 07:02:56 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[71041]: ts=2026-03-10T07:02:56.424Z caller=node_exporter.go:117 level=info collector=arp 2026-03-10T07:02:56.655 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 07:02:56 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[71041]: ts=2026-03-10T07:02:56.424Z caller=node_exporter.go:117 level=info collector=bcache 2026-03-10T07:02:56.655 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 07:02:56 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[71041]: ts=2026-03-10T07:02:56.424Z caller=node_exporter.go:117 level=info collector=bonding 2026-03-10T07:02:56.655 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 07:02:56 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[71041]: ts=2026-03-10T07:02:56.424Z caller=node_exporter.go:117 level=info collector=btrfs 2026-03-10T07:02:56.655 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 07:02:56 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[71041]: ts=2026-03-10T07:02:56.424Z caller=node_exporter.go:117 level=info collector=conntrack 2026-03-10T07:02:56.655 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 07:02:56 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[71041]: ts=2026-03-10T07:02:56.424Z caller=node_exporter.go:117 level=info collector=cpu 2026-03-10T07:02:56.655 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 07:02:56 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[71041]: ts=2026-03-10T07:02:56.424Z caller=node_exporter.go:117 level=info collector=cpufreq 2026-03-10T07:02:56.655 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 07:02:56 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[71041]: ts=2026-03-10T07:02:56.424Z caller=node_exporter.go:117 level=info collector=diskstats 2026-03-10T07:02:56.655 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 07:02:56 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[71041]: ts=2026-03-10T07:02:56.424Z caller=node_exporter.go:117 level=info collector=dmi 2026-03-10T07:02:56.655 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 07:02:56 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[71041]: ts=2026-03-10T07:02:56.424Z caller=node_exporter.go:117 level=info collector=edac 2026-03-10T07:02:56.655 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 07:02:56 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[71041]: ts=2026-03-10T07:02:56.424Z caller=node_exporter.go:117 level=info collector=entropy 2026-03-10T07:02:56.655 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 07:02:56 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[71041]: ts=2026-03-10T07:02:56.424Z caller=node_exporter.go:117 level=info collector=fibrechannel 2026-03-10T07:02:56.655 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 07:02:56 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[71041]: ts=2026-03-10T07:02:56.424Z caller=node_exporter.go:117 level=info collector=filefd 2026-03-10T07:02:56.655 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 07:02:56 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[71041]: ts=2026-03-10T07:02:56.424Z caller=node_exporter.go:117 level=info collector=filesystem 2026-03-10T07:02:56.655 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 07:02:56 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[71041]: ts=2026-03-10T07:02:56.424Z caller=node_exporter.go:117 level=info collector=hwmon 2026-03-10T07:02:56.655 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 07:02:56 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[71041]: ts=2026-03-10T07:02:56.424Z caller=node_exporter.go:117 level=info collector=infiniband 2026-03-10T07:02:56.655 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 07:02:56 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[71041]: ts=2026-03-10T07:02:56.424Z caller=node_exporter.go:117 level=info collector=ipvs 2026-03-10T07:02:56.655 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 07:02:56 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[71041]: ts=2026-03-10T07:02:56.424Z caller=node_exporter.go:117 level=info collector=loadavg 2026-03-10T07:02:56.655 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 07:02:56 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[71041]: ts=2026-03-10T07:02:56.424Z caller=node_exporter.go:117 level=info collector=mdadm 2026-03-10T07:02:56.655 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 07:02:56 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[71041]: ts=2026-03-10T07:02:56.424Z caller=node_exporter.go:117 level=info collector=meminfo 2026-03-10T07:02:56.655 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 07:02:56 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[71041]: ts=2026-03-10T07:02:56.424Z caller=node_exporter.go:117 level=info collector=netclass 2026-03-10T07:02:56.655 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 07:02:56 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[71041]: ts=2026-03-10T07:02:56.424Z caller=node_exporter.go:117 level=info collector=netdev 2026-03-10T07:02:56.655 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 07:02:56 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[71041]: ts=2026-03-10T07:02:56.424Z caller=node_exporter.go:117 level=info collector=netstat 2026-03-10T07:02:56.655 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 07:02:56 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[71041]: ts=2026-03-10T07:02:56.424Z caller=node_exporter.go:117 level=info collector=nfs 2026-03-10T07:02:56.655 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 07:02:56 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[71041]: ts=2026-03-10T07:02:56.425Z caller=node_exporter.go:117 level=info collector=nfsd 2026-03-10T07:02:56.655 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 07:02:56 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[71041]: ts=2026-03-10T07:02:56.425Z caller=node_exporter.go:117 level=info collector=nvme 2026-03-10T07:02:56.655 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 07:02:56 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[71041]: ts=2026-03-10T07:02:56.425Z caller=node_exporter.go:117 level=info collector=os 2026-03-10T07:02:56.655 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 07:02:56 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[71041]: ts=2026-03-10T07:02:56.425Z caller=node_exporter.go:117 level=info collector=powersupplyclass 2026-03-10T07:02:56.655 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 07:02:56 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[71041]: ts=2026-03-10T07:02:56.425Z caller=node_exporter.go:117 level=info collector=pressure 2026-03-10T07:02:56.655 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 07:02:56 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[71041]: ts=2026-03-10T07:02:56.425Z caller=node_exporter.go:117 level=info collector=rapl 2026-03-10T07:02:56.655 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 07:02:56 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[71041]: ts=2026-03-10T07:02:56.425Z caller=node_exporter.go:117 level=info collector=schedstat 2026-03-10T07:02:56.655 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 07:02:56 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[71041]: ts=2026-03-10T07:02:56.425Z caller=node_exporter.go:117 level=info collector=selinux 2026-03-10T07:02:56.655 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 07:02:56 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[71041]: ts=2026-03-10T07:02:56.425Z caller=node_exporter.go:117 level=info collector=sockstat 2026-03-10T07:02:56.655 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 07:02:56 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[71041]: ts=2026-03-10T07:02:56.425Z caller=node_exporter.go:117 level=info collector=softnet 2026-03-10T07:02:56.655 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 07:02:56 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[71041]: ts=2026-03-10T07:02:56.425Z caller=node_exporter.go:117 level=info collector=stat 2026-03-10T07:02:56.655 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 07:02:56 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[71041]: ts=2026-03-10T07:02:56.425Z caller=node_exporter.go:117 level=info collector=tapestats 2026-03-10T07:02:56.655 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 07:02:56 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[71041]: ts=2026-03-10T07:02:56.425Z caller=node_exporter.go:117 level=info collector=textfile 2026-03-10T07:02:56.655 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 07:02:56 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[71041]: ts=2026-03-10T07:02:56.425Z caller=node_exporter.go:117 level=info collector=thermal_zone 2026-03-10T07:02:56.655 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 07:02:56 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[71041]: ts=2026-03-10T07:02:56.425Z caller=node_exporter.go:117 level=info collector=time 2026-03-10T07:02:56.656 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 07:02:56 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[71041]: ts=2026-03-10T07:02:56.425Z caller=node_exporter.go:117 level=info collector=udp_queues 2026-03-10T07:02:56.656 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 07:02:56 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[71041]: ts=2026-03-10T07:02:56.425Z caller=node_exporter.go:117 level=info collector=uname 2026-03-10T07:02:56.656 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 07:02:56 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[71041]: ts=2026-03-10T07:02:56.425Z caller=node_exporter.go:117 level=info collector=vmstat 2026-03-10T07:02:56.656 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 07:02:56 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[71041]: ts=2026-03-10T07:02:56.425Z caller=node_exporter.go:117 level=info collector=xfs 2026-03-10T07:02:56.656 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 07:02:56 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[71041]: ts=2026-03-10T07:02:56.425Z caller=node_exporter.go:117 level=info collector=zfs 2026-03-10T07:02:56.656 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 07:02:56 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[71041]: ts=2026-03-10T07:02:56.425Z caller=tls_config.go:274 level=info msg="Listening on" address=[::]:9100 2026-03-10T07:02:56.656 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 07:02:56 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b[71041]: ts=2026-03-10T07:02:56.425Z caller=tls_config.go:277 level=info msg="TLS is disabled." http2=false address=[::]:9100 2026-03-10T07:02:57.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:56 vm05 ceph-mon[48591]: osdmap e79: 8 total, 8 up, 8 in 2026-03-10T07:02:57.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:56 vm05 ceph-mon[48591]: from='client.? 192.168.123.102:0/4277341966' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/1681950952"}]: dispatch 2026-03-10T07:02:57.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:56 vm05 ceph-mon[48591]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/1681950952"}]: dispatch 2026-03-10T07:02:57.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:56 vm05 ceph-mon[48591]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T07:02:57.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:56 vm05 ceph-mon[48591]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:57.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:56 vm05 ceph-mon[48591]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:57.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:56 vm05 ceph-mon[48591]: Reconfiguring prometheus.a (dependencies changed)... 2026-03-10T07:02:57.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:56 vm05 ceph-mon[48591]: pgmap v13: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail; 639 B/s rd, 0 op/s 2026-03-10T07:02:57.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:56 vm02 ceph-mon[54377]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:6800/596000248"}]': finished 2026-03-10T07:02:57.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:56 vm02 ceph-mon[54377]: osdmap e79: 8 total, 8 up, 8 in 2026-03-10T07:02:57.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:56 vm02 ceph-mon[54377]: from='client.? 192.168.123.102:0/4277341966' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/1681950952"}]: dispatch 2026-03-10T07:02:57.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:56 vm02 ceph-mon[54377]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/1681950952"}]: dispatch 2026-03-10T07:02:57.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:56 vm02 ceph-mon[54377]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T07:02:57.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:56 vm02 ceph-mon[54377]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:57.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:56 vm02 ceph-mon[54377]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:57.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:56 vm02 ceph-mon[54377]: Reconfiguring prometheus.a (dependencies changed)... 2026-03-10T07:02:57.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:56 vm02 ceph-mon[54377]: pgmap v13: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail; 639 B/s rd, 0 op/s 2026-03-10T07:02:57.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:56 vm02 ceph-mon[50158]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:6800/596000248"}]': finished 2026-03-10T07:02:57.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:56 vm02 ceph-mon[50158]: osdmap e79: 8 total, 8 up, 8 in 2026-03-10T07:02:57.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:56 vm02 ceph-mon[50158]: from='client.? 192.168.123.102:0/4277341966' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/1681950952"}]: dispatch 2026-03-10T07:02:57.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:56 vm02 ceph-mon[50158]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/1681950952"}]: dispatch 2026-03-10T07:02:57.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:56 vm02 ceph-mon[50158]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T07:02:57.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:56 vm02 ceph-mon[50158]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:57.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:56 vm02 ceph-mon[50158]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:02:57.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:56 vm02 ceph-mon[50158]: Reconfiguring prometheus.a (dependencies changed)... 2026-03-10T07:02:57.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:56 vm02 ceph-mon[50158]: pgmap v13: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail; 639 B/s rd, 0 op/s 2026-03-10T07:02:58.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:57 vm05 ceph-mon[48591]: Deploying daemon prometheus.a on vm05 2026-03-10T07:02:58.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:57 vm05 ceph-mon[48591]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/1681950952"}]': finished 2026-03-10T07:02:58.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:57 vm05 ceph-mon[48591]: osdmap e80: 8 total, 8 up, 8 in 2026-03-10T07:02:58.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:57 vm05 ceph-mon[48591]: from='client.? 192.168.123.102:0/347084476' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/3332446178"}]: dispatch 2026-03-10T07:02:58.084 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:57 vm02 ceph-mon[54377]: Deploying daemon prometheus.a on vm05 2026-03-10T07:02:58.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:57 vm02 ceph-mon[54377]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/1681950952"}]': finished 2026-03-10T07:02:58.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:57 vm02 ceph-mon[54377]: osdmap e80: 8 total, 8 up, 8 in 2026-03-10T07:02:58.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:57 vm02 ceph-mon[54377]: from='client.? 192.168.123.102:0/347084476' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/3332446178"}]: dispatch 2026-03-10T07:02:58.085 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:02:57 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[50367]: ::ffff:192.168.123.105 - - [10/Mar/2026:07:02:57] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-10T07:02:58.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:57 vm02 ceph-mon[50158]: Deploying daemon prometheus.a on vm05 2026-03-10T07:02:58.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:57 vm02 ceph-mon[50158]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/1681950952"}]': finished 2026-03-10T07:02:58.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:57 vm02 ceph-mon[50158]: osdmap e80: 8 total, 8 up, 8 in 2026-03-10T07:02:58.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:57 vm02 ceph-mon[50158]: from='client.? 192.168.123.102:0/347084476' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/3332446178"}]: dispatch 2026-03-10T07:02:59.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:58 vm05 ceph-mon[48591]: from='client.? 192.168.123.102:0/347084476' entity='client.iscsi.foo.vm02.iphfbm' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/3332446178"}]': finished 2026-03-10T07:02:59.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:58 vm05 ceph-mon[48591]: osdmap e81: 8 total, 8 up, 8 in 2026-03-10T07:02:59.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:58 vm05 ceph-mon[48591]: from='client.? 192.168.123.102:0/508984799' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/2782992750"}]: dispatch 2026-03-10T07:02:59.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:58 vm05 ceph-mon[48591]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/2782992750"}]: dispatch 2026-03-10T07:02:59.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:58 vm05 ceph-mon[48591]: pgmap v16: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail; 1.5 KiB/s rd, 1 op/s 2026-03-10T07:02:59.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:58 vm02 ceph-mon[54377]: from='client.? 192.168.123.102:0/347084476' entity='client.iscsi.foo.vm02.iphfbm' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/3332446178"}]': finished 2026-03-10T07:02:59.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:58 vm02 ceph-mon[54377]: osdmap e81: 8 total, 8 up, 8 in 2026-03-10T07:02:59.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:58 vm02 ceph-mon[54377]: from='client.? 192.168.123.102:0/508984799' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/2782992750"}]: dispatch 2026-03-10T07:02:59.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:58 vm02 ceph-mon[54377]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/2782992750"}]: dispatch 2026-03-10T07:02:59.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:58 vm02 ceph-mon[54377]: pgmap v16: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail; 1.5 KiB/s rd, 1 op/s 2026-03-10T07:02:59.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:58 vm02 ceph-mon[50158]: from='client.? 192.168.123.102:0/347084476' entity='client.iscsi.foo.vm02.iphfbm' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/3332446178"}]': finished 2026-03-10T07:02:59.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:58 vm02 ceph-mon[50158]: osdmap e81: 8 total, 8 up, 8 in 2026-03-10T07:02:59.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:58 vm02 ceph-mon[50158]: from='client.? 192.168.123.102:0/508984799' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/2782992750"}]: dispatch 2026-03-10T07:02:59.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:58 vm02 ceph-mon[50158]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/2782992750"}]: dispatch 2026-03-10T07:02:59.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:58 vm02 ceph-mon[50158]: pgmap v16: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail; 1.5 KiB/s rd, 1 op/s 2026-03-10T07:03:00.005 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:59 vm05 ceph-mon[48591]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/2782992750"}]': finished 2026-03-10T07:03:00.005 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:59 vm05 ceph-mon[48591]: osdmap e82: 8 total, 8 up, 8 in 2026-03-10T07:03:00.005 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:02:59 vm05 ceph-mon[48591]: from='client.? 192.168.123.102:0/769700225' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/2765940575"}]: dispatch 2026-03-10T07:03:00.084 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:59 vm02 ceph-mon[54377]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/2782992750"}]': finished 2026-03-10T07:03:00.084 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:59 vm02 ceph-mon[54377]: osdmap e82: 8 total, 8 up, 8 in 2026-03-10T07:03:00.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:02:59 vm02 ceph-mon[54377]: from='client.? 192.168.123.102:0/769700225' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/2765940575"}]: dispatch 2026-03-10T07:03:00.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:59 vm02 ceph-mon[50158]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/2782992750"}]': finished 2026-03-10T07:03:00.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:59 vm02 ceph-mon[50158]: osdmap e82: 8 total, 8 up, 8 in 2026-03-10T07:03:00.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:02:59 vm02 ceph-mon[50158]: from='client.? 192.168.123.102:0/769700225' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/2765940575"}]: dispatch 2026-03-10T07:03:01.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:00 vm05 ceph-mon[48591]: from='client.? 192.168.123.102:0/769700225' entity='client.iscsi.foo.vm02.iphfbm' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/2765940575"}]': finished 2026-03-10T07:03:01.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:00 vm05 ceph-mon[48591]: osdmap e83: 8 total, 8 up, 8 in 2026-03-10T07:03:01.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:00 vm05 ceph-mon[48591]: from='client.? 192.168.123.102:0/2067606909' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/2121258844"}]: dispatch 2026-03-10T07:03:01.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:00 vm05 ceph-mon[48591]: pgmap v19: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail; 1.5 KiB/s rd, 1 op/s 2026-03-10T07:03:01.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:00 vm02 ceph-mon[50158]: from='client.? 192.168.123.102:0/769700225' entity='client.iscsi.foo.vm02.iphfbm' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/2765940575"}]': finished 2026-03-10T07:03:01.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:00 vm02 ceph-mon[50158]: osdmap e83: 8 total, 8 up, 8 in 2026-03-10T07:03:01.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:00 vm02 ceph-mon[50158]: from='client.? 192.168.123.102:0/2067606909' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/2121258844"}]: dispatch 2026-03-10T07:03:01.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:00 vm02 ceph-mon[50158]: pgmap v19: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail; 1.5 KiB/s rd, 1 op/s 2026-03-10T07:03:01.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:00 vm02 ceph-mon[54377]: from='client.? 192.168.123.102:0/769700225' entity='client.iscsi.foo.vm02.iphfbm' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/2765940575"}]': finished 2026-03-10T07:03:01.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:00 vm02 ceph-mon[54377]: osdmap e83: 8 total, 8 up, 8 in 2026-03-10T07:03:01.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:00 vm02 ceph-mon[54377]: from='client.? 192.168.123.102:0/2067606909' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/2121258844"}]: dispatch 2026-03-10T07:03:01.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:00 vm02 ceph-mon[54377]: pgmap v19: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail; 1.5 KiB/s rd, 1 op/s 2026-03-10T07:03:01.250 INFO:teuthology.orchestra.run.vm02.stdout:true 2026-03-10T07:03:01.668 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T07:03:01.668 INFO:teuthology.orchestra.run.vm02.stdout:alertmanager.a vm02 *:9093,9094 starting - - - - 2026-03-10T07:03:01.668 INFO:teuthology.orchestra.run.vm02.stdout:grafana.a vm05 *:3000 running (4m) 17s ago 4m 53.2M - dad864ee21e9 81ae9caed32e 2026-03-10T07:03:01.668 INFO:teuthology.orchestra.run.vm02.stdout:iscsi.foo.vm02.iphfbm vm02 starting - - - - 2026-03-10T07:03:01.668 INFO:teuthology.orchestra.run.vm02.stdout:mgr.x vm05 *:8443,9283 running (26s) 17s ago 6m 531M - 19.2.3-678-ge911bdeb 654f31e6858e 1fbb0bd98b14 2026-03-10T07:03:01.668 INFO:teuthology.orchestra.run.vm02.stdout:mgr.y vm02 *:9283 running (6m) 17s ago 6m 202M - 17.2.0 e1d6a67b021e 1e8c9dfeac74 2026-03-10T07:03:01.668 INFO:teuthology.orchestra.run.vm02.stdout:mon.a vm02 running (6m) 17s ago 6m 53.8M 2048M 17.2.0 e1d6a67b021e 16a124fbc55b 2026-03-10T07:03:01.668 INFO:teuthology.orchestra.run.vm02.stdout:mon.b vm05 running (6m) 17s ago 6m 41.5M 2048M 17.2.0 e1d6a67b021e c223c571b7ce 2026-03-10T07:03:01.668 INFO:teuthology.orchestra.run.vm02.stdout:mon.c vm02 running (6m) 17s ago 6m 43.7M 2048M 17.2.0 e1d6a67b021e d42e67599bdc 2026-03-10T07:03:01.668 INFO:teuthology.orchestra.run.vm02.stdout:node-exporter.a vm02 *:9100 starting - - - - 2026-03-10T07:03:01.668 INFO:teuthology.orchestra.run.vm02.stdout:node-exporter.b vm05 *:9100 starting - - - - 2026-03-10T07:03:01.668 INFO:teuthology.orchestra.run.vm02.stdout:osd.0 vm02 running (5m) 17s ago 5m 50.1M 4096M 17.2.0 e1d6a67b021e 0e972212a251 2026-03-10T07:03:01.668 INFO:teuthology.orchestra.run.vm02.stdout:osd.1 vm02 running (5m) 17s ago 5m 55.0M 4096M 17.2.0 e1d6a67b021e a9f4c8b44f48 2026-03-10T07:03:01.668 INFO:teuthology.orchestra.run.vm02.stdout:osd.2 vm02 running (5m) 17s ago 5m 48.8M 4096M 17.2.0 e1d6a67b021e 6409c5be18aa 2026-03-10T07:03:01.668 INFO:teuthology.orchestra.run.vm02.stdout:osd.3 vm02 running (5m) 17s ago 5m 49.4M 4096M 17.2.0 e1d6a67b021e 5df7cfae139f 2026-03-10T07:03:01.668 INFO:teuthology.orchestra.run.vm02.stdout:osd.4 vm05 running (5m) 17s ago 5m 51.7M 4096M 17.2.0 e1d6a67b021e dcf9e1b40e11 2026-03-10T07:03:01.668 INFO:teuthology.orchestra.run.vm02.stdout:osd.5 vm05 running (5m) 17s ago 5m 49.4M 4096M 17.2.0 e1d6a67b021e ef1a70593f89 2026-03-10T07:03:01.668 INFO:teuthology.orchestra.run.vm02.stdout:osd.6 vm05 running (5m) 17s ago 5m 51.8M 4096M 17.2.0 e1d6a67b021e 9f83e32d5eb6 2026-03-10T07:03:01.668 INFO:teuthology.orchestra.run.vm02.stdout:osd.7 vm05 running (4m) 17s ago 4m 51.3M 4096M 17.2.0 e1d6a67b021e 83d454094982 2026-03-10T07:03:01.668 INFO:teuthology.orchestra.run.vm02.stdout:prometheus.a vm05 *:9095 running (4m) 17s ago 4m 58.8M - 2.33.4 514e6a882f6e 354a9ecb0815 2026-03-10T07:03:01.668 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.kkmsll vm02 *:8000 running (4m) 17s ago 4m 92.3M - 17.2.0 e1d6a67b021e ef903c439808 2026-03-10T07:03:01.668 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm05.bmslvs vm05 *:8000 running (4m) 17s ago 4m 92.2M - 17.2.0 e1d6a67b021e acd35f4810d9 2026-03-10T07:03:01.668 INFO:teuthology.orchestra.run.vm02.stdout:rgw.smpl.vm02.kyvfxo vm02 *:80 running (4m) 17s ago 4m 90.2M - 17.2.0 e1d6a67b021e 6c68381e5378 2026-03-10T07:03:01.668 INFO:teuthology.orchestra.run.vm02.stdout:rgw.smpl.vm05.xjafam vm05 *:80 running (4m) 17s ago 4m 92.4M - 17.2.0 e1d6a67b021e 62a81876b05e 2026-03-10T07:03:01.692 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:01 vm05 systemd[1]: Stopping Ceph prometheus.a for 28bd35e6-1c4e-11f1-9057-21b3549603fc... 2026-03-10T07:03:01.692 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:01 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[66988]: ts=2026-03-10T07:03:01.485Z caller=main.go:775 level=warn msg="Received SIGTERM, exiting gracefully..." 2026-03-10T07:03:01.692 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:01 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[66988]: ts=2026-03-10T07:03:01.485Z caller=main.go:798 level=info msg="Stopping scrape discovery manager..." 2026-03-10T07:03:01.692 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:01 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[66988]: ts=2026-03-10T07:03:01.485Z caller=main.go:812 level=info msg="Stopping notify discovery manager..." 2026-03-10T07:03:01.692 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:01 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[66988]: ts=2026-03-10T07:03:01.485Z caller=main.go:834 level=info msg="Stopping scrape manager..." 2026-03-10T07:03:01.692 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:01 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[66988]: ts=2026-03-10T07:03:01.485Z caller=main.go:794 level=info msg="Scrape discovery manager stopped" 2026-03-10T07:03:01.692 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:01 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[66988]: ts=2026-03-10T07:03:01.485Z caller=main.go:808 level=info msg="Notify discovery manager stopped" 2026-03-10T07:03:01.692 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:01 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[66988]: ts=2026-03-10T07:03:01.486Z caller=manager.go:945 level=info component="rule manager" msg="Stopping rule manager..." 2026-03-10T07:03:01.692 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:01 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[66988]: ts=2026-03-10T07:03:01.486Z caller=manager.go:955 level=info component="rule manager" msg="Rule manager stopped" 2026-03-10T07:03:01.692 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:01 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[66988]: ts=2026-03-10T07:03:01.486Z caller=main.go:828 level=info msg="Scrape manager stopped" 2026-03-10T07:03:01.692 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:01 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[66988]: ts=2026-03-10T07:03:01.487Z caller=notifier.go:600 level=info component=notifier msg="Stopping notification manager..." 2026-03-10T07:03:01.692 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:01 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[66988]: ts=2026-03-10T07:03:01.487Z caller=main.go:1054 level=info msg="Notifier manager stopped" 2026-03-10T07:03:01.692 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:01 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[66988]: ts=2026-03-10T07:03:01.487Z caller=main.go:1066 level=info msg="See you next time!" 2026-03-10T07:03:01.692 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:01 vm05 podman[71392]: 2026-03-10 07:03:01.496763105 +0000 UTC m=+0.026895515 container died 354a9ecb0815a137ed2d00f9321e921ea5632003e6d2d9ad2c230c60a8429c7a (image=quay.io/prometheus/prometheus:v2.33.4, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-10T07:03:01.692 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:01 vm05 podman[71392]: 2026-03-10 07:03:01.513515949 +0000 UTC m=+0.043648359 container remove 354a9ecb0815a137ed2d00f9321e921ea5632003e6d2d9ad2c230c60a8429c7a (image=quay.io/prometheus/prometheus:v2.33.4, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-10T07:03:01.692 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:01 vm05 bash[71392]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a 2026-03-10T07:03:01.692 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:01 vm05 systemd[1]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@prometheus.a.service: Deactivated successfully. 2026-03-10T07:03:01.692 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:01 vm05 systemd[1]: Stopped Ceph prometheus.a for 28bd35e6-1c4e-11f1-9057-21b3549603fc. 2026-03-10T07:03:01.916 INFO:teuthology.orchestra.run.vm02.stdout:{ 2026-03-10T07:03:01.916 INFO:teuthology.orchestra.run.vm02.stdout: "mon": { 2026-03-10T07:03:01.916 INFO:teuthology.orchestra.run.vm02.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 3 2026-03-10T07:03:01.916 INFO:teuthology.orchestra.run.vm02.stdout: }, 2026-03-10T07:03:01.916 INFO:teuthology.orchestra.run.vm02.stdout: "mgr": { 2026-03-10T07:03:01.916 INFO:teuthology.orchestra.run.vm02.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 1, 2026-03-10T07:03:01.916 INFO:teuthology.orchestra.run.vm02.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 1 2026-03-10T07:03:01.916 INFO:teuthology.orchestra.run.vm02.stdout: }, 2026-03-10T07:03:01.916 INFO:teuthology.orchestra.run.vm02.stdout: "osd": { 2026-03-10T07:03:01.916 INFO:teuthology.orchestra.run.vm02.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 8 2026-03-10T07:03:01.916 INFO:teuthology.orchestra.run.vm02.stdout: }, 2026-03-10T07:03:01.916 INFO:teuthology.orchestra.run.vm02.stdout: "mds": {}, 2026-03-10T07:03:01.916 INFO:teuthology.orchestra.run.vm02.stdout: "rgw": { 2026-03-10T07:03:01.916 INFO:teuthology.orchestra.run.vm02.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 4 2026-03-10T07:03:01.916 INFO:teuthology.orchestra.run.vm02.stdout: }, 2026-03-10T07:03:01.917 INFO:teuthology.orchestra.run.vm02.stdout: "overall": { 2026-03-10T07:03:01.917 INFO:teuthology.orchestra.run.vm02.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 16, 2026-03-10T07:03:01.917 INFO:teuthology.orchestra.run.vm02.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 1 2026-03-10T07:03:01.917 INFO:teuthology.orchestra.run.vm02.stdout: } 2026-03-10T07:03:01.917 INFO:teuthology.orchestra.run.vm02.stdout:} 2026-03-10T07:03:01.968 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:01 vm05 ceph-mon[48591]: from='client.? 192.168.123.102:0/2067606909' entity='client.iscsi.foo.vm02.iphfbm' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/2121258844"}]': finished 2026-03-10T07:03:01.968 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:01 vm05 ceph-mon[48591]: osdmap e84: 8 total, 8 up, 8 in 2026-03-10T07:03:01.968 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:01 vm05 ceph-mon[48591]: from='client.24979 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:03:01.968 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:01 vm05 ceph-mon[48591]: from='client.24985 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:03:01.968 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:01 vm05 systemd[1]: Starting Ceph prometheus.a for 28bd35e6-1c4e-11f1-9057-21b3549603fc... 2026-03-10T07:03:01.968 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:01 vm05 podman[71503]: 2026-03-10 07:03:01.900189883 +0000 UTC m=+0.019476761 container create 3d2bddb87dc57c0be0eb81eaf4354abb441d4305a65a8e5590bf72d990dd7366 (image=quay.io/prometheus/prometheus:v2.51.0, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-10T07:03:01.968 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:01 vm05 podman[71503]: 2026-03-10 07:03:01.928684471 +0000 UTC m=+0.047971349 container init 3d2bddb87dc57c0be0eb81eaf4354abb441d4305a65a8e5590bf72d990dd7366 (image=quay.io/prometheus/prometheus:v2.51.0, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-10T07:03:01.968 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:01 vm05 podman[71503]: 2026-03-10 07:03:01.93143631 +0000 UTC m=+0.050723188 container start 3d2bddb87dc57c0be0eb81eaf4354abb441d4305a65a8e5590bf72d990dd7366 (image=quay.io/prometheus/prometheus:v2.51.0, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-10T07:03:01.968 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:01 vm05 bash[71503]: 3d2bddb87dc57c0be0eb81eaf4354abb441d4305a65a8e5590bf72d990dd7366 2026-03-10T07:03:01.968 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:01 vm05 podman[71503]: 2026-03-10 07:03:01.892468663 +0000 UTC m=+0.011755551 image pull 1d3b7f56885b6dd623f1785be963aa9c195f86bc256ea454e8d02a7980b79c53 quay.io/prometheus/prometheus:v2.51.0 2026-03-10T07:03:01.968 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:01 vm05 systemd[1]: Started Ceph prometheus.a for 28bd35e6-1c4e-11f1-9057-21b3549603fc. 2026-03-10T07:03:01.968 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:01 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[71513]: ts=2026-03-10T07:03:01.958Z caller=main.go:617 level=info msg="Starting Prometheus Server" mode=server version="(version=2.51.0, branch=HEAD, revision=c05c15512acb675e3f6cd662a6727854e93fc024)" 2026-03-10T07:03:01.968 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:01 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[71513]: ts=2026-03-10T07:03:01.958Z caller=main.go:622 level=info build_context="(go=go1.22.1, platform=linux/amd64, user=root@b5723e458358, date=20240319-10:54:45, tags=netgo,builtinassets,stringlabels)" 2026-03-10T07:03:01.968 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:01 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[71513]: ts=2026-03-10T07:03:01.958Z caller=main.go:623 level=info host_details="(Linux 5.14.0-686.el9.x86_64 #1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026 x86_64 vm05 (none))" 2026-03-10T07:03:01.968 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:01 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[71513]: ts=2026-03-10T07:03:01.958Z caller=main.go:624 level=info fd_limits="(soft=1048576, hard=1048576)" 2026-03-10T07:03:01.968 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:01 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[71513]: ts=2026-03-10T07:03:01.958Z caller=main.go:625 level=info vm_limits="(soft=unlimited, hard=unlimited)" 2026-03-10T07:03:01.968 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:01 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[71513]: ts=2026-03-10T07:03:01.960Z caller=web.go:568 level=info component=web msg="Start listening for connections" address=:9095 2026-03-10T07:03:01.968 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:01 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[71513]: ts=2026-03-10T07:03:01.962Z caller=main.go:1129 level=info msg="Starting TSDB ..." 2026-03-10T07:03:01.968 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:01 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[71513]: ts=2026-03-10T07:03:01.966Z caller=tls_config.go:313 level=info component=web msg="Listening on" address=[::]:9095 2026-03-10T07:03:01.968 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:01 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[71513]: ts=2026-03-10T07:03:01.966Z caller=tls_config.go:316 level=info component=web msg="TLS is disabled." http2=false address=[::]:9095 2026-03-10T07:03:02.084 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:01 vm02 ceph-mon[54377]: from='client.? 192.168.123.102:0/2067606909' entity='client.iscsi.foo.vm02.iphfbm' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/2121258844"}]': finished 2026-03-10T07:03:02.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:01 vm02 ceph-mon[54377]: osdmap e84: 8 total, 8 up, 8 in 2026-03-10T07:03:02.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:01 vm02 ceph-mon[54377]: from='client.24979 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:03:02.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:01 vm02 ceph-mon[54377]: from='client.24985 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:03:02.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:01 vm02 ceph-mon[50158]: from='client.? 192.168.123.102:0/2067606909' entity='client.iscsi.foo.vm02.iphfbm' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/2121258844"}]': finished 2026-03-10T07:03:02.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:01 vm02 ceph-mon[50158]: osdmap e84: 8 total, 8 up, 8 in 2026-03-10T07:03:02.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:01 vm02 ceph-mon[50158]: from='client.24979 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:03:02.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:01 vm02 ceph-mon[50158]: from='client.24985 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:03:02.194 INFO:teuthology.orchestra.run.vm02.stdout:{ 2026-03-10T07:03:02.194 INFO:teuthology.orchestra.run.vm02.stdout: "target_image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", 2026-03-10T07:03:02.194 INFO:teuthology.orchestra.run.vm02.stdout: "in_progress": true, 2026-03-10T07:03:02.194 INFO:teuthology.orchestra.run.vm02.stdout: "which": "Upgrading all daemon types on all hosts", 2026-03-10T07:03:02.194 INFO:teuthology.orchestra.run.vm02.stdout: "services_complete": [], 2026-03-10T07:03:02.194 INFO:teuthology.orchestra.run.vm02.stdout: "progress": "1/23 daemons upgraded", 2026-03-10T07:03:02.194 INFO:teuthology.orchestra.run.vm02.stdout: "message": "", 2026-03-10T07:03:02.194 INFO:teuthology.orchestra.run.vm02.stdout: "is_paused": false 2026-03-10T07:03:02.194 INFO:teuthology.orchestra.run.vm02.stdout:} 2026-03-10T07:03:02.254 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:01 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[71513]: ts=2026-03-10T07:03:01.967Z caller=head.go:616 level=info component=tsdb msg="Replaying on-disk memory mappable chunks if any" 2026-03-10T07:03:02.254 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:01 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[71513]: ts=2026-03-10T07:03:01.967Z caller=head.go:698 level=info component=tsdb msg="On-disk memory mappable chunks replay completed" duration=1.303µs 2026-03-10T07:03:02.254 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:01 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[71513]: ts=2026-03-10T07:03:01.967Z caller=head.go:706 level=info component=tsdb msg="Replaying WAL, this may take a while" 2026-03-10T07:03:02.254 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:01 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[71513]: ts=2026-03-10T07:03:01.998Z caller=head.go:778 level=info component=tsdb msg="WAL segment loaded" segment=0 maxSegment=2 2026-03-10T07:03:02.254 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:02 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[71513]: ts=2026-03-10T07:03:02.009Z caller=head.go:778 level=info component=tsdb msg="WAL segment loaded" segment=1 maxSegment=2 2026-03-10T07:03:02.254 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:02 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[71513]: ts=2026-03-10T07:03:02.010Z caller=head.go:778 level=info component=tsdb msg="WAL segment loaded" segment=2 maxSegment=2 2026-03-10T07:03:02.254 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:02 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[71513]: ts=2026-03-10T07:03:02.010Z caller=head.go:815 level=info component=tsdb msg="WAL replay completed" checkpoint_replay_duration=16.882µs wal_replay_duration=42.999934ms wbl_replay_duration=120ns total_replay_duration=43.033306ms 2026-03-10T07:03:02.254 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:02 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[71513]: ts=2026-03-10T07:03:02.013Z caller=main.go:1150 level=info fs_type=XFS_SUPER_MAGIC 2026-03-10T07:03:02.254 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:02 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[71513]: ts=2026-03-10T07:03:02.013Z caller=main.go:1153 level=info msg="TSDB started" 2026-03-10T07:03:02.254 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:02 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[71513]: ts=2026-03-10T07:03:02.013Z caller=main.go:1335 level=info msg="Loading configuration file" filename=/etc/prometheus/prometheus.yml 2026-03-10T07:03:02.254 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:02 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[71513]: ts=2026-03-10T07:03:02.036Z caller=main.go:1372 level=info msg="Completed loading of configuration file" filename=/etc/prometheus/prometheus.yml totalDuration=23.095293ms db_storage=731ns remote_storage=941ns web_handler=270ns query_engine=730ns scrape=8.841085ms scrape_sd=112.131µs notify=7.935µs notify_sd=5.511µs rules=13.846564ms tracing=7.073µs 2026-03-10T07:03:02.254 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:02 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[71513]: ts=2026-03-10T07:03:02.036Z caller=main.go:1114 level=info msg="Server is ready to receive web requests." 2026-03-10T07:03:02.254 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:02 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[71513]: ts=2026-03-10T07:03:02.036Z caller=manager.go:163 level=info component="rule manager" msg="Starting rule manager..." 2026-03-10T07:03:02.437 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_OK 2026-03-10T07:03:02.585 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 07:03:02 vm05 systemd[1]: Stopping Ceph grafana.a for 28bd35e6-1c4e-11f1-9057-21b3549603fc... 2026-03-10T07:03:02.585 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 07:03:02 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[63692]: t=2026-03-10T07:03:02+0000 lvl=info msg="Shutdown started" logger=server reason="System signal: terminated" 2026-03-10T07:03:02.585 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 07:03:02 vm05 podman[71641]: 2026-03-10 07:03:02.544838042 +0000 UTC m=+0.032052456 container died 81ae9caed32eb596b37a84c79ce414d6d3f8e2f9c7a953e9f5c31e985596779b (image=quay.io/ceph/ceph-grafana:8.3.5, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a, build-date=2022-03-28T10:36:18.413762, release=236.1648460182, vendor=Red Hat, Inc., io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., summary=Grafana Container configured for Ceph mgr/dashboard integration, io.buildah.version=1.24.2, io.openshift.tags=base rhel8, distribution-scope=public, io.k8s.display-name=Red Hat Universal Base Image 8, io.openshift.expose-services=, maintainer=Paul Cuzner , description=Ceph Grafana Container, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, vcs-ref=3aadd00326f3dd6cfe65ee31017ab98915fddb56, architecture=x86_64, com.redhat.component=ubi8-container, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.5-236.1648460182, version=8.5, name=ubi8, vcs-type=git) 2026-03-10T07:03:02.586 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 07:03:02 vm05 podman[71641]: 2026-03-10 07:03:02.565851679 +0000 UTC m=+0.053066093 container remove 81ae9caed32eb596b37a84c79ce414d6d3f8e2f9c7a953e9f5c31e985596779b (image=quay.io/ceph/ceph-grafana:8.3.5, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a, architecture=x86_64, build-date=2022-03-28T10:36:18.413762, description=Ceph Grafana Container, summary=Grafana Container configured for Ceph mgr/dashboard integration, vcs-type=git, io.buildah.version=1.24.2, maintainer=Paul Cuzner , vendor=Red Hat, Inc., com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, com.redhat.component=ubi8-container, io.k8s.display-name=Red Hat Universal Base Image 8, name=ubi8, distribution-scope=public, version=8.5, release=236.1648460182, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.5-236.1648460182, vcs-ref=3aadd00326f3dd6cfe65ee31017ab98915fddb56, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.tags=base rhel8, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.openshift.expose-services=) 2026-03-10T07:03:02.586 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 07:03:02 vm05 bash[71641]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a 2026-03-10T07:03:02.849 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:02 vm05 ceph-mon[48591]: from='client.24991 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:03:02.850 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:02 vm05 ceph-mon[48591]: from='client.? 192.168.123.102:0/2352776564' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:03:02.850 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:02 vm05 ceph-mon[48591]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:03:02.850 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:02 vm05 ceph-mon[48591]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:03:02.850 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:02 vm05 ceph-mon[48591]: Reconfiguring grafana.a (dependencies changed)... 2026-03-10T07:03:02.850 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:02 vm05 ceph-mon[48591]: Regenerating cephadm self-signed grafana TLS certificates 2026-03-10T07:03:02.850 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:02 vm05 ceph-mon[48591]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:03:02.850 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:02 vm05 ceph-mon[48591]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:03:02.850 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:02 vm05 ceph-mon[48591]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-10T07:03:02.850 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:02 vm05 ceph-mon[48591]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-10T07:03:02.850 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:02 vm05 ceph-mon[48591]: Reconfiguring daemon grafana.a on vm05 2026-03-10T07:03:02.850 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:02 vm05 ceph-mon[48591]: from='client.25003 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:03:02.850 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:02 vm05 ceph-mon[48591]: from='client.? 192.168.123.102:0/1324146423' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T07:03:02.850 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:02 vm05 ceph-mon[48591]: pgmap v21: 161 pgs: 161 active+clean; 457 KiB data, 99 MiB used, 160 GiB / 160 GiB avail; 1.0 KiB/s rd, 1 op/s 2026-03-10T07:03:02.850 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:02 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: [10/Mar/2026:07:03:02] ENGINE Bus STOPPING 2026-03-10T07:03:02.850 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 07:03:02 vm05 bash[71659]: Error: no container with name or ID "ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana.a" found: no such container 2026-03-10T07:03:02.850 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 07:03:02 vm05 systemd[1]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@grafana.a.service: Deactivated successfully. 2026-03-10T07:03:02.850 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 07:03:02 vm05 systemd[1]: Stopped Ceph grafana.a for 28bd35e6-1c4e-11f1-9057-21b3549603fc. 2026-03-10T07:03:02.850 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 07:03:02 vm05 systemd[1]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@grafana.a.service: Consumed 1.378s CPU time. 2026-03-10T07:03:02.850 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 07:03:02 vm05 systemd[1]: Starting Ceph grafana.a for 28bd35e6-1c4e-11f1-9057-21b3549603fc... 2026-03-10T07:03:02.850 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 07:03:02 vm05 podman[71699]: 2026-03-10 07:03:02.700342066 +0000 UTC m=+0.018359100 container create 34567dcb4b514d0f8c2217a5f2563d5e13ea893b75b75582d24a34a0eebc29b9 (image=quay.io/ceph/ceph-grafana:8.3.5, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a, com.redhat.component=ubi8-container, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.tags=base rhel8, vcs-type=git, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, summary=Grafana Container configured for Ceph mgr/dashboard integration, description=Ceph Grafana Container, build-date=2022-03-28T10:36:18.413762, version=8.5, io.openshift.expose-services=, distribution-scope=public, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, io.buildah.version=1.24.2, release=236.1648460182, maintainer=Paul Cuzner , vendor=Red Hat, Inc., vcs-ref=3aadd00326f3dd6cfe65ee31017ab98915fddb56, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.5-236.1648460182, name=ubi8, io.k8s.display-name=Red Hat Universal Base Image 8, architecture=x86_64) 2026-03-10T07:03:02.850 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 07:03:02 vm05 podman[71699]: 2026-03-10 07:03:02.73771518 +0000 UTC m=+0.055732203 container init 34567dcb4b514d0f8c2217a5f2563d5e13ea893b75b75582d24a34a0eebc29b9 (image=quay.io/ceph/ceph-grafana:8.3.5, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., description=Ceph Grafana Container, distribution-scope=public, architecture=x86_64, com.redhat.component=ubi8-container, name=ubi8, build-date=2022-03-28T10:36:18.413762, version=8.5, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.5-236.1648460182, vcs-type=git, io.k8s.display-name=Red Hat Universal Base Image 8, vcs-ref=3aadd00326f3dd6cfe65ee31017ab98915fddb56, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, io.buildah.version=1.24.2, release=236.1648460182, summary=Grafana Container configured for Ceph mgr/dashboard integration, maintainer=Paul Cuzner , vendor=Red Hat, Inc., io.openshift.expose-services=, io.openshift.tags=base rhel8) 2026-03-10T07:03:02.850 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 07:03:02 vm05 podman[71699]: 2026-03-10 07:03:02.746601079 +0000 UTC m=+0.064618113 container start 34567dcb4b514d0f8c2217a5f2563d5e13ea893b75b75582d24a34a0eebc29b9 (image=quay.io/ceph/ceph-grafana:8.3.5, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a, description=Ceph Grafana Container, version=8.5, distribution-scope=public, vcs-type=git, io.buildah.version=1.24.2, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., build-date=2022-03-28T10:36:18.413762, io.k8s.display-name=Red Hat Universal Base Image 8, vcs-ref=3aadd00326f3dd6cfe65ee31017ab98915fddb56, io.openshift.tags=base rhel8, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.5-236.1648460182, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, maintainer=Paul Cuzner , vendor=Red Hat, Inc., com.redhat.component=ubi8-container, name=ubi8, release=236.1648460182, summary=Grafana Container configured for Ceph mgr/dashboard integration, io.openshift.expose-services=, architecture=x86_64) 2026-03-10T07:03:02.850 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 07:03:02 vm05 bash[71699]: 34567dcb4b514d0f8c2217a5f2563d5e13ea893b75b75582d24a34a0eebc29b9 2026-03-10T07:03:02.850 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 07:03:02 vm05 podman[71699]: 2026-03-10 07:03:02.693414882 +0000 UTC m=+0.011431916 image pull dad864ee21e98e69f4029d1e417aa085001566be0d322fbc75bc6f29b0050c01 quay.io/ceph/ceph-grafana:8.3.5 2026-03-10T07:03:02.850 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 07:03:02 vm05 systemd[1]: Started Ceph grafana.a for 28bd35e6-1c4e-11f1-9057-21b3549603fc. 2026-03-10T07:03:02.850 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 07:03:02 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[71709]: t=2026-03-10T07:03:02+0000 lvl=info msg="The state of unified alerting is still not defined. The decision will be made during as we run the database migrations" logger=settings 2026-03-10T07:03:02.850 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 07:03:02 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[71709]: t=2026-03-10T07:03:02+0000 lvl=warn msg="falling back to legacy setting of 'min_interval_seconds'; please use the configuration option in the `unified_alerting` section if Grafana 8 alerts are enabled." logger=settings 2026-03-10T07:03:02.850 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 07:03:02 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[71709]: t=2026-03-10T07:03:02+0000 lvl=info msg="Config loaded from" logger=settings file=/usr/share/grafana/conf/defaults.ini 2026-03-10T07:03:02.850 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 07:03:02 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[71709]: t=2026-03-10T07:03:02+0000 lvl=info msg="Config loaded from" logger=settings file=/etc/grafana/grafana.ini 2026-03-10T07:03:02.850 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 07:03:02 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[71709]: t=2026-03-10T07:03:02+0000 lvl=info msg="Config overridden from Environment variable" logger=settings var="GF_PATHS_DATA=/var/lib/grafana" 2026-03-10T07:03:02.850 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 07:03:02 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[71709]: t=2026-03-10T07:03:02+0000 lvl=info msg="Config overridden from Environment variable" logger=settings var="GF_PATHS_LOGS=/var/log/grafana" 2026-03-10T07:03:02.850 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 07:03:02 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[71709]: t=2026-03-10T07:03:02+0000 lvl=info msg="Config overridden from Environment variable" logger=settings var="GF_PATHS_PLUGINS=/var/lib/grafana/plugins" 2026-03-10T07:03:02.850 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 07:03:02 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[71709]: t=2026-03-10T07:03:02+0000 lvl=info msg="Config overridden from Environment variable" logger=settings var="GF_PATHS_PROVISIONING=/etc/grafana/provisioning" 2026-03-10T07:03:02.850 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 07:03:02 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[71709]: t=2026-03-10T07:03:02+0000 lvl=info msg="Path Home" logger=settings path=/usr/share/grafana 2026-03-10T07:03:02.850 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 07:03:02 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[71709]: t=2026-03-10T07:03:02+0000 lvl=info msg="Path Data" logger=settings path=/var/lib/grafana 2026-03-10T07:03:02.850 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 07:03:02 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[71709]: t=2026-03-10T07:03:02+0000 lvl=info msg="Path Logs" logger=settings path=/var/log/grafana 2026-03-10T07:03:02.850 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 07:03:02 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[71709]: t=2026-03-10T07:03:02+0000 lvl=info msg="Path Plugins" logger=settings path=/var/lib/grafana/plugins 2026-03-10T07:03:02.850 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 07:03:02 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[71709]: t=2026-03-10T07:03:02+0000 lvl=info msg="Path Provisioning" logger=settings path=/etc/grafana/provisioning 2026-03-10T07:03:02.851 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 07:03:02 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[71709]: t=2026-03-10T07:03:02+0000 lvl=info msg="App mode production" logger=settings 2026-03-10T07:03:02.851 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 07:03:02 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[71709]: t=2026-03-10T07:03:02+0000 lvl=info msg="Connecting to DB" logger=sqlstore dbtype=sqlite3 2026-03-10T07:03:02.851 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 07:03:02 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[71709]: t=2026-03-10T07:03:02+0000 lvl=warn msg="SQLite database file has broader permissions than it should" logger=sqlstore path=/var/lib/grafana/grafana.db mode=-rw-r--r-- expected=-rw-r----- 2026-03-10T07:03:02.851 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 07:03:02 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[71709]: t=2026-03-10T07:03:02+0000 lvl=info msg="Starting DB migrations" logger=migrator 2026-03-10T07:03:02.851 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 07:03:02 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[71709]: t=2026-03-10T07:03:02+0000 lvl=info msg="migrations completed" logger=migrator performed=0 skipped=377 duration=501.439µs 2026-03-10T07:03:02.851 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 07:03:02 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[71709]: t=2026-03-10T07:03:02+0000 lvl=info msg="Created default organization" logger=sqlstore 2026-03-10T07:03:02.851 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 07:03:02 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[71709]: t=2026-03-10T07:03:02+0000 lvl=info msg="Initialising plugins" logger=plugin.manager 2026-03-10T07:03:02.851 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 07:03:02 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[71709]: t=2026-03-10T07:03:02+0000 lvl=info msg="Plugin registered" logger=plugin.manager pluginId=input 2026-03-10T07:03:03.073 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:02 vm02 ceph-mon[54377]: from='client.24991 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:03:03.073 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:02 vm02 ceph-mon[54377]: from='client.? 192.168.123.102:0/2352776564' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:03:03.073 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:02 vm02 ceph-mon[54377]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:03:03.073 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:02 vm02 ceph-mon[54377]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:03:03.073 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:02 vm02 ceph-mon[54377]: Reconfiguring grafana.a (dependencies changed)... 2026-03-10T07:03:03.073 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:02 vm02 ceph-mon[54377]: Regenerating cephadm self-signed grafana TLS certificates 2026-03-10T07:03:03.073 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:02 vm02 ceph-mon[54377]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:03:03.073 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:02 vm02 ceph-mon[54377]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:03:03.073 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:02 vm02 ceph-mon[54377]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-10T07:03:03.073 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:02 vm02 ceph-mon[54377]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-10T07:03:03.073 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:02 vm02 ceph-mon[54377]: Reconfiguring daemon grafana.a on vm05 2026-03-10T07:03:03.073 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:02 vm02 ceph-mon[54377]: from='client.25003 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:03:03.073 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:02 vm02 ceph-mon[54377]: from='client.? 192.168.123.102:0/1324146423' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T07:03:03.074 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:02 vm02 ceph-mon[54377]: pgmap v21: 161 pgs: 161 active+clean; 457 KiB data, 99 MiB used, 160 GiB / 160 GiB avail; 1.0 KiB/s rd, 1 op/s 2026-03-10T07:03:03.077 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:03:02 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[81637]: ts=2026-03-10T07:03:02.899Z caller=cluster.go:698 level=info component=cluster msg="gossip settled; proceeding" elapsed=10.003433108s 2026-03-10T07:03:03.077 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:02 vm02 ceph-mon[50158]: from='client.24991 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:03:03.077 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:02 vm02 ceph-mon[50158]: from='client.? 192.168.123.102:0/2352776564' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:03:03.077 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:02 vm02 ceph-mon[50158]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:03:03.077 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:02 vm02 ceph-mon[50158]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:03:03.077 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:02 vm02 ceph-mon[50158]: Reconfiguring grafana.a (dependencies changed)... 2026-03-10T07:03:03.077 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:02 vm02 ceph-mon[50158]: Regenerating cephadm self-signed grafana TLS certificates 2026-03-10T07:03:03.077 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:02 vm02 ceph-mon[50158]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:03:03.077 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:02 vm02 ceph-mon[50158]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:03:03.077 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:02 vm02 ceph-mon[50158]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-10T07:03:03.077 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:02 vm02 ceph-mon[50158]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-10T07:03:03.077 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:02 vm02 ceph-mon[50158]: Reconfiguring daemon grafana.a on vm05 2026-03-10T07:03:03.077 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:02 vm02 ceph-mon[50158]: from='client.25003 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:03:03.077 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:02 vm02 ceph-mon[50158]: from='client.? 192.168.123.102:0/1324146423' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T07:03:03.077 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:02 vm02 ceph-mon[50158]: pgmap v21: 161 pgs: 161 active+clean; 457 KiB data, 99 MiB used, 160 GiB / 160 GiB avail; 1.0 KiB/s rd, 1 op/s 2026-03-10T07:03:03.101 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:02 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: [10/Mar/2026:07:03:02] ENGINE HTTP Server cherrypy._cpwsgi_server.CPWSGIServer(('::', 9283)) shut down 2026-03-10T07:03:03.101 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:02 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: [10/Mar/2026:07:03:02] ENGINE Bus STOPPED 2026-03-10T07:03:03.101 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:02 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: [10/Mar/2026:07:03:02] ENGINE Bus STARTING 2026-03-10T07:03:03.101 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:03 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: [10/Mar/2026:07:03:03] ENGINE Serving on http://:::9283 2026-03-10T07:03:03.101 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:03 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: [10/Mar/2026:07:03:03] ENGINE Bus STARTED 2026-03-10T07:03:03.101 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:03 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: [10/Mar/2026:07:03:03] ENGINE Bus STOPPING 2026-03-10T07:03:03.102 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 07:03:02 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[71709]: t=2026-03-10T07:03:02+0000 lvl=info msg="Plugin registered" logger=plugin.manager pluginId=vonage-status-panel 2026-03-10T07:03:03.102 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 07:03:02 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[71709]: t=2026-03-10T07:03:02+0000 lvl=info msg="Plugin registered" logger=plugin.manager pluginId=grafana-piechart-panel 2026-03-10T07:03:03.102 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 07:03:02 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[71709]: t=2026-03-10T07:03:02+0000 lvl=info msg="Live Push Gateway initialization" logger=live.push_http 2026-03-10T07:03:03.102 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 07:03:02 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[71709]: t=2026-03-10T07:03:02+0000 lvl=info msg="deleted datasource based on configuration" logger=provisioning.datasources name=Dashboard1 2026-03-10T07:03:03.102 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 07:03:02 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[71709]: t=2026-03-10T07:03:02+0000 lvl=info msg="inserting datasource from configuration " logger=provisioning.datasources name=Dashboard1 uid=P43CA22E17D0F9596 2026-03-10T07:03:03.102 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 07:03:02 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[71709]: t=2026-03-10T07:03:02+0000 lvl=info msg="inserting datasource from configuration " logger=provisioning.datasources name=Loki uid=P8E80F9AEF21F6940 2026-03-10T07:03:03.102 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 07:03:02 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[71709]: t=2026-03-10T07:03:02+0000 lvl=info msg="HTTP Server Listen" logger=http.server address=[::]:3000 protocol=https subUrl= socket= 2026-03-10T07:03:03.102 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 07:03:02 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[71709]: t=2026-03-10T07:03:02+0000 lvl=info msg="warming cache for startup" logger=ngalert 2026-03-10T07:03:03.102 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 07:03:02 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[71709]: t=2026-03-10T07:03:02+0000 lvl=info msg="starting MultiOrg Alertmanager" logger=ngalert.multiorg.alertmanager 2026-03-10T07:03:03.503 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:03 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: [10/Mar/2026:07:03:03] ENGINE HTTP Server cherrypy._cpwsgi_server.CPWSGIServer(('::', 9283)) shut down 2026-03-10T07:03:03.503 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:03 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: [10/Mar/2026:07:03:03] ENGINE Bus STOPPED 2026-03-10T07:03:03.503 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:03 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: [10/Mar/2026:07:03:03] ENGINE Bus STARTING 2026-03-10T07:03:03.503 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:03 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: [10/Mar/2026:07:03:03] ENGINE Serving on http://:::9283 2026-03-10T07:03:03.503 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:03 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: [10/Mar/2026:07:03:03] ENGINE Bus STARTED 2026-03-10T07:03:03.503 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:03 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: [10/Mar/2026:07:03:03] ENGINE Bus STOPPING 2026-03-10T07:03:03.503 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:03 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: [10/Mar/2026:07:03:03] ENGINE HTTP Server cherrypy._cpwsgi_server.CPWSGIServer(('::', 9283)) shut down 2026-03-10T07:03:03.503 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:03 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: [10/Mar/2026:07:03:03] ENGINE Bus STOPPED 2026-03-10T07:03:03.503 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:03 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: [10/Mar/2026:07:03:03] ENGINE Bus STARTING 2026-03-10T07:03:03.503 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:03 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: [10/Mar/2026:07:03:03] ENGINE Serving on http://:::9283 2026-03-10T07:03:03.503 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:03 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: [10/Mar/2026:07:03:03] ENGINE Bus STARTED 2026-03-10T07:03:03.752 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:03 vm02 systemd[1]: Stopping Ceph mgr.y for 28bd35e6-1c4e-11f1-9057-21b3549603fc... 2026-03-10T07:03:04.040 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:03 vm02 ceph-mon[50158]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:03:04.040 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:03 vm02 ceph-mon[50158]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:03:04.040 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:03 vm02 ceph-mon[50158]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-10T07:03:04.040 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:03 vm02 ceph-mon[50158]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-10T07:03:04.040 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:03 vm02 ceph-mon[50158]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://vm02.local:9093"}]: dispatch 2026-03-10T07:03:04.040 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:03 vm02 ceph-mon[50158]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://vm02.local:9093"}]: dispatch 2026-03-10T07:03:04.040 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:03 vm02 ceph-mon[50158]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:03:04.040 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:03 vm02 ceph-mon[50158]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-10T07:03:04.040 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:03 vm02 ceph-mon[50158]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-10T07:03:04.040 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:03 vm02 ceph-mon[50158]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:03:04.040 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:03 vm02 ceph-mon[50158]: Adding iSCSI gateway http://:@192.168.123.102:5000 to Dashboard 2026-03-10T07:03:04.040 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:03 vm02 ceph-mon[50158]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-10T07:03:04.040 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:03 vm02 ceph-mon[50158]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-10T07:03:04.040 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:03 vm02 ceph-mon[50158]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "dashboard iscsi-gateway-add", "name": "vm02"}]: dispatch 2026-03-10T07:03:04.040 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:03 vm02 ceph-mon[50158]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-add", "name": "vm02"}]: dispatch 2026-03-10T07:03:04.040 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:03 vm02 ceph-mon[50158]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:03:04.040 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:03 vm02 ceph-mon[50158]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T07:03:04.040 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:03 vm02 ceph-mon[50158]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T07:03:04.040 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:03 vm02 ceph-mon[50158]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://host.containers.internal:9095"}]: dispatch 2026-03-10T07:03:04.040 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:03 vm02 ceph-mon[50158]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://host.containers.internal:9095"}]: dispatch 2026-03-10T07:03:04.040 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:03 vm02 ceph-mon[50158]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:03:04.040 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:03 vm02 ceph-mon[50158]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-10T07:03:04.040 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:03 vm02 ceph-mon[50158]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-10T07:03:04.040 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:03 vm02 ceph-mon[50158]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://host.containers.internal:3000"}]: dispatch 2026-03-10T07:03:04.040 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:03 vm02 ceph-mon[50158]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://host.containers.internal:3000"}]: dispatch 2026-03-10T07:03:04.040 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:03 vm02 ceph-mon[50158]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:03:04.040 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:03 vm02 ceph-mon[50158]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T07:03:04.040 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:03 vm02 ceph-mon[50158]: Upgrade: Need to upgrade myself (mgr.x) 2026-03-10T07:03:04.040 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:03 vm02 ceph-mon[50158]: Upgrade: Need to upgrade myself (mgr.x) 2026-03-10T07:03:04.040 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:03 vm02 ceph-mon[50158]: Upgrade: Updating mgr.y 2026-03-10T07:03:04.040 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:03 vm02 ceph-mon[50158]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:03:04.040 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:03 vm02 ceph-mon[50158]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.y", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T07:03:04.040 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:03 vm02 ceph-mon[50158]: from='mgr.24776 ' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.y", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T07:03:04.040 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:03 vm02 ceph-mon[50158]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-10T07:03:04.040 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:03 vm02 ceph-mon[50158]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:03:04.040 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:03 vm02 ceph-mon[50158]: Deploying daemon mgr.y on vm02 2026-03-10T07:03:04.041 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:03 vm02 podman[82493]: 2026-03-10 07:03:03.752319707 +0000 UTC m=+0.047667422 container died 1e8c9dfeac748e101546921b947d19bc51bea042ddd2973e5ad1320be80302ae (image=quay.io/ceph/ceph:v17.2.0, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y, io.openshift.expose-services=, maintainer=Guillaume Abrioux , description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, io.openshift.tags=base centos centos-stream, release=754, GIT_REPO=https://github.com/ceph/ceph-container.git, architecture=x86_64, vcs-type=git, io.k8s.display-name=CentOS Stream 8, GIT_CLEAN=True, build-date=2022-05-03T08:36:31.336870, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, name=centos-stream, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, GIT_BRANCH=HEAD, com.redhat.component=centos-stream-container, version=8, RELEASE=HEAD, io.buildah.version=1.19.8, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., com.redhat.license_terms=https://centos.org/legal/licensing-policy/, CEPH_POINT_RELEASE=-17.2.0, ceph=True, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, vendor=Red Hat, Inc., GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, distribution-scope=public) 2026-03-10T07:03:04.041 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:03 vm02 podman[82493]: 2026-03-10 07:03:03.775619328 +0000 UTC m=+0.070967052 container remove 1e8c9dfeac748e101546921b947d19bc51bea042ddd2973e5ad1320be80302ae (image=quay.io/ceph/ceph:v17.2.0, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y, io.openshift.expose-services=, GIT_BRANCH=HEAD, RELEASE=HEAD, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, io.k8s.display-name=CentOS Stream 8, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, release=754, version=8, name=centos-stream, vcs-type=git, build-date=2022-05-03T08:36:31.336870, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.tags=base centos centos-stream, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, distribution-scope=public, io.buildah.version=1.19.8, CEPH_POINT_RELEASE=-17.2.0, maintainer=Guillaume Abrioux , url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, GIT_CLEAN=True, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, GIT_REPO=https://github.com/ceph/ceph-container.git, ceph=True, com.redhat.component=centos-stream-container, vendor=Red Hat, Inc., summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, architecture=x86_64) 2026-03-10T07:03:04.041 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:03 vm02 bash[82493]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y 2026-03-10T07:03:04.041 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:03 vm02 systemd[1]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@mgr.y.service: Main process exited, code=exited, status=143/n/a 2026-03-10T07:03:04.041 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:03 vm02 systemd[1]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@mgr.y.service: Failed with result 'exit-code'. 2026-03-10T07:03:04.041 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:03 vm02 systemd[1]: Stopped Ceph mgr.y for 28bd35e6-1c4e-11f1-9057-21b3549603fc. 2026-03-10T07:03:04.041 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:03 vm02 systemd[1]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@mgr.y.service: Consumed 32.287s CPU time. 2026-03-10T07:03:04.041 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:03 vm02 ceph-mon[54377]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:03:04.041 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:03 vm02 ceph-mon[54377]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:03:04.041 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:03 vm02 ceph-mon[54377]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-10T07:03:04.041 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:03 vm02 ceph-mon[54377]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-10T07:03:04.041 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:03 vm02 ceph-mon[54377]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://vm02.local:9093"}]: dispatch 2026-03-10T07:03:04.041 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:03 vm02 ceph-mon[54377]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://vm02.local:9093"}]: dispatch 2026-03-10T07:03:04.041 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:03 vm02 ceph-mon[54377]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:03:04.041 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:03 vm02 ceph-mon[54377]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-10T07:03:04.041 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:03 vm02 ceph-mon[54377]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-10T07:03:04.041 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:03 vm02 ceph-mon[54377]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:03:04.041 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:03 vm02 ceph-mon[54377]: Adding iSCSI gateway http://:@192.168.123.102:5000 to Dashboard 2026-03-10T07:03:04.041 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:03 vm02 ceph-mon[54377]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-10T07:03:04.041 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:03 vm02 ceph-mon[54377]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-10T07:03:04.041 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:03 vm02 ceph-mon[54377]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "dashboard iscsi-gateway-add", "name": "vm02"}]: dispatch 2026-03-10T07:03:04.041 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:03 vm02 ceph-mon[54377]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-add", "name": "vm02"}]: dispatch 2026-03-10T07:03:04.041 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:03 vm02 ceph-mon[54377]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:03:04.041 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:03 vm02 ceph-mon[54377]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T07:03:04.041 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:03 vm02 ceph-mon[54377]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T07:03:04.041 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:03 vm02 ceph-mon[54377]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://host.containers.internal:9095"}]: dispatch 2026-03-10T07:03:04.041 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:03 vm02 ceph-mon[54377]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://host.containers.internal:9095"}]: dispatch 2026-03-10T07:03:04.041 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:03 vm02 ceph-mon[54377]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:03:04.041 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:03 vm02 ceph-mon[54377]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-10T07:03:04.041 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:03 vm02 ceph-mon[54377]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-10T07:03:04.041 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:03 vm02 ceph-mon[54377]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://host.containers.internal:3000"}]: dispatch 2026-03-10T07:03:04.041 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:03 vm02 ceph-mon[54377]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://host.containers.internal:3000"}]: dispatch 2026-03-10T07:03:04.041 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:03 vm02 ceph-mon[54377]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:03:04.041 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:03 vm02 ceph-mon[54377]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T07:03:04.041 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:03 vm02 ceph-mon[54377]: Upgrade: Need to upgrade myself (mgr.x) 2026-03-10T07:03:04.041 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:03 vm02 ceph-mon[54377]: Upgrade: Need to upgrade myself (mgr.x) 2026-03-10T07:03:04.041 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:03 vm02 ceph-mon[54377]: Upgrade: Updating mgr.y 2026-03-10T07:03:04.041 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:03 vm02 ceph-mon[54377]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:03:04.041 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:03 vm02 ceph-mon[54377]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.y", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T07:03:04.041 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:03 vm02 ceph-mon[54377]: from='mgr.24776 ' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.y", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T07:03:04.041 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:03 vm02 ceph-mon[54377]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-10T07:03:04.041 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:03 vm02 ceph-mon[54377]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:03:04.041 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:03 vm02 ceph-mon[54377]: Deploying daemon mgr.y on vm02 2026-03-10T07:03:04.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:03 vm05 ceph-mon[48591]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:03:04.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:03 vm05 ceph-mon[48591]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:03:04.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:03 vm05 ceph-mon[48591]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-10T07:03:04.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:03 vm05 ceph-mon[48591]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-10T07:03:04.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:03 vm05 ceph-mon[48591]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://vm02.local:9093"}]: dispatch 2026-03-10T07:03:04.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:03 vm05 ceph-mon[48591]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://vm02.local:9093"}]: dispatch 2026-03-10T07:03:04.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:03 vm05 ceph-mon[48591]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:03:04.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:03 vm05 ceph-mon[48591]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-10T07:03:04.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:03 vm05 ceph-mon[48591]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-10T07:03:04.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:03 vm05 ceph-mon[48591]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:03:04.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:03 vm05 ceph-mon[48591]: Adding iSCSI gateway http://:@192.168.123.102:5000 to Dashboard 2026-03-10T07:03:04.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:03 vm05 ceph-mon[48591]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-10T07:03:04.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:03 vm05 ceph-mon[48591]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-10T07:03:04.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:03 vm05 ceph-mon[48591]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "dashboard iscsi-gateway-add", "name": "vm02"}]: dispatch 2026-03-10T07:03:04.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:03 vm05 ceph-mon[48591]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-add", "name": "vm02"}]: dispatch 2026-03-10T07:03:04.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:03 vm05 ceph-mon[48591]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:03:04.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:03 vm05 ceph-mon[48591]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T07:03:04.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:03 vm05 ceph-mon[48591]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T07:03:04.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:03 vm05 ceph-mon[48591]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://host.containers.internal:9095"}]: dispatch 2026-03-10T07:03:04.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:03 vm05 ceph-mon[48591]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://host.containers.internal:9095"}]: dispatch 2026-03-10T07:03:04.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:03 vm05 ceph-mon[48591]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:03:04.255 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:03 vm05 ceph-mon[48591]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-10T07:03:04.255 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:03 vm05 ceph-mon[48591]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-10T07:03:04.255 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:03 vm05 ceph-mon[48591]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://host.containers.internal:3000"}]: dispatch 2026-03-10T07:03:04.255 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:03 vm05 ceph-mon[48591]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://host.containers.internal:3000"}]: dispatch 2026-03-10T07:03:04.255 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:03 vm05 ceph-mon[48591]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:03:04.255 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:03 vm05 ceph-mon[48591]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T07:03:04.255 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:03 vm05 ceph-mon[48591]: Upgrade: Need to upgrade myself (mgr.x) 2026-03-10T07:03:04.255 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:03 vm05 ceph-mon[48591]: Upgrade: Need to upgrade myself (mgr.x) 2026-03-10T07:03:04.255 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:03 vm05 ceph-mon[48591]: Upgrade: Updating mgr.y 2026-03-10T07:03:04.255 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:03 vm05 ceph-mon[48591]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:03:04.255 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:03 vm05 ceph-mon[48591]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.y", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T07:03:04.255 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:03 vm05 ceph-mon[48591]: from='mgr.24776 ' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.y", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T07:03:04.255 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:03 vm05 ceph-mon[48591]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-10T07:03:04.255 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:03 vm05 ceph-mon[48591]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:03:04.255 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:03 vm05 ceph-mon[48591]: Deploying daemon mgr.y on vm02 2026-03-10T07:03:04.335 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:04 vm02 systemd[1]: Starting Ceph mgr.y for 28bd35e6-1c4e-11f1-9057-21b3549603fc... 2026-03-10T07:03:04.335 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:04 vm02 podman[82602]: 2026-03-10 07:03:04.168208253 +0000 UTC m=+0.039166140 container create e1dc22afcf314ee0c1b06506296457fec689e5563f24878e13ed47e58d48c885 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y, ceph=True, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.schema-version=1.0, OSD_FLAVOR=default, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=squid, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git) 2026-03-10T07:03:04.335 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:04 vm02 podman[82602]: 2026-03-10 07:03:04.224171102 +0000 UTC m=+0.095128998 container init e1dc22afcf314ee0c1b06506296457fec689e5563f24878e13ed47e58d48c885 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20260223, CEPH_REF=squid, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True) 2026-03-10T07:03:04.335 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:04 vm02 podman[82602]: 2026-03-10 07:03:04.227025172 +0000 UTC m=+0.097983059 container start e1dc22afcf314ee0c1b06506296457fec689e5563f24878e13ed47e58d48c885 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, CEPH_REF=squid, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True, OSD_FLAVOR=default, org.label-schema.build-date=20260223, org.opencontainers.image.authors=Ceph Release Team , io.buildah.version=1.41.3, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9) 2026-03-10T07:03:04.335 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:04 vm02 bash[82602]: e1dc22afcf314ee0c1b06506296457fec689e5563f24878e13ed47e58d48c885 2026-03-10T07:03:04.335 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:04 vm02 podman[82602]: 2026-03-10 07:03:04.139852339 +0000 UTC m=+0.010810226 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T07:03:04.335 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:04 vm02 systemd[1]: Started Ceph mgr.y for 28bd35e6-1c4e-11f1-9057-21b3549603fc. 2026-03-10T07:03:04.335 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:04 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: 2026-03-10T07:03:04.332+0000 7feb93754140 -1 mgr[py] Module alerts has missing NOTIFY_TYPES member 2026-03-10T07:03:04.585 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:04 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: 2026-03-10T07:03:04.377+0000 7feb93754140 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-10T07:03:05.000 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:04 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: 2026-03-10T07:03:04.860+0000 7feb93754140 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-10T07:03:05.270 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:05 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: 2026-03-10T07:03:05.227+0000 7feb93754140 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-10T07:03:05.271 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:05 vm02 ceph-mon[50158]: from='client.24928 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:03:05.271 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:05 vm02 ceph-mon[50158]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:03:05.271 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:05 vm02 ceph-mon[50158]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:03:05.271 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:05 vm02 ceph-mon[50158]: pgmap v22: 161 pgs: 161 active+clean; 457 KiB data, 99 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:03:05.556 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:05 vm02 ceph-mon[54377]: from='client.24928 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:03:05.557 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:05 vm02 ceph-mon[54377]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:03:05.557 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:05 vm02 ceph-mon[54377]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:03:05.557 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:05 vm02 ceph-mon[54377]: pgmap v22: 161 pgs: 161 active+clean; 457 KiB data, 99 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:03:05.557 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:05 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: /lib64/python3.9/site-packages/scipy/__init__.py:73: UserWarning: NumPy was imported from a Python sub-interpreter but NumPy does not properly support sub-interpreters. This will likely work for most users but might cause hard to track down issues or subtle bugs. A common user of the rare sub-interpreter feature is wsgi which also allows single-interpreter mode. 2026-03-10T07:03:05.557 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:05 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: Improvements in the case of bugs are welcome, but is not on the NumPy roadmap, and full support may require significant effort to achieve. 2026-03-10T07:03:05.557 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:05 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: from numpy import show_config as show_numpy_config 2026-03-10T07:03:05.557 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:05 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: 2026-03-10T07:03:05.328+0000 7feb93754140 -1 mgr[py] Module diskprediction_local has missing NOTIFY_TYPES member 2026-03-10T07:03:05.557 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:05 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: 2026-03-10T07:03:05.369+0000 7feb93754140 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-10T07:03:05.557 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:05 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: 2026-03-10T07:03:05.456+0000 7feb93754140 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-10T07:03:05.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:05 vm05 ceph-mon[48591]: from='client.24928 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:03:05.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:05 vm05 ceph-mon[48591]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:03:05.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:05 vm05 ceph-mon[48591]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:03:05.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:05 vm05 ceph-mon[48591]: pgmap v22: 161 pgs: 161 active+clean; 457 KiB data, 99 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:03:06.174 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:06 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: 2026-03-10T07:03:06.046+0000 7feb93754140 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-10T07:03:06.174 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:06 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: 2026-03-10T07:03:06.172+0000 7feb93754140 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-10T07:03:06.529 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:06 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: 2026-03-10T07:03:06.217+0000 7feb93754140 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-10T07:03:06.529 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:06 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: 2026-03-10T07:03:06.253+0000 7feb93754140 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-10T07:03:06.529 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:06 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: 2026-03-10T07:03:06.299+0000 7feb93754140 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-10T07:03:06.529 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:06 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: 2026-03-10T07:03:06.338+0000 7feb93754140 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-10T07:03:06.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:06 vm02 ceph-mon[50158]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:03:06.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:06 vm02 ceph-mon[50158]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:03:06.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:06 vm02 ceph-mon[50158]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:03:06.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:06 vm02 ceph-mon[50158]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:03:06.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:06 vm02 ceph-mon[54377]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:03:06.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:06 vm02 ceph-mon[54377]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:03:06.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:06 vm02 ceph-mon[54377]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:03:06.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:06 vm02 ceph-mon[54377]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:03:06.836 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:06 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: 2026-03-10T07:03:06.527+0000 7feb93754140 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-10T07:03:06.836 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:06 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: 2026-03-10T07:03:06.584+0000 7feb93754140 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-10T07:03:07.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:06 vm05 ceph-mon[48591]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:03:07.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:06 vm05 ceph-mon[48591]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:03:07.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:06 vm05 ceph-mon[48591]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:03:07.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:06 vm05 ceph-mon[48591]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:03:07.116 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:06 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: 2026-03-10T07:03:06.842+0000 7feb93754140 -1 mgr[py] Module rgw has missing NOTIFY_TYPES member 2026-03-10T07:03:07.401 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:07 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: 2026-03-10T07:03:07.202+0000 7feb93754140 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-10T07:03:07.402 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:07 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: 2026-03-10T07:03:07.248+0000 7feb93754140 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-10T07:03:07.402 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:07 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: 2026-03-10T07:03:07.304+0000 7feb93754140 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-10T07:03:07.801 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:07 vm02 ceph-mon[54377]: pgmap v23: 161 pgs: 161 active+clean; 457 KiB data, 99 MiB used, 160 GiB / 160 GiB avail; 648 B/s rd, 0 op/s 2026-03-10T07:03:07.801 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:07 vm02 ceph-mon[54377]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:03:07.801 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:07 vm02 ceph-mon[54377]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:03:07.801 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:07 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: 2026-03-10T07:03:07.399+0000 7feb93754140 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-10T07:03:07.801 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:07 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: 2026-03-10T07:03:07.438+0000 7feb93754140 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-10T07:03:07.801 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:07 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: 2026-03-10T07:03:07.525+0000 7feb93754140 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-10T07:03:07.801 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:07 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: 2026-03-10T07:03:07.649+0000 7feb93754140 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-10T07:03:07.802 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:07 vm02 ceph-mon[50158]: pgmap v23: 161 pgs: 161 active+clean; 457 KiB data, 99 MiB used, 160 GiB / 160 GiB avail; 648 B/s rd, 0 op/s 2026-03-10T07:03:07.802 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:07 vm02 ceph-mon[50158]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:03:07.802 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:07 vm02 ceph-mon[50158]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:03:08.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:07 vm05 ceph-mon[48591]: pgmap v23: 161 pgs: 161 active+clean; 457 KiB data, 99 MiB used, 160 GiB / 160 GiB avail; 648 B/s rd, 0 op/s 2026-03-10T07:03:08.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:07 vm05 ceph-mon[48591]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:03:08.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:07 vm05 ceph-mon[48591]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:03:08.060 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:07 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: 2026-03-10T07:03:07.799+0000 7feb93754140 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-10T07:03:08.060 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:07 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: 2026-03-10T07:03:07.842+0000 7feb93754140 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-10T07:03:08.060 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:07 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: [10/Mar/2026:07:03:07] ENGINE Bus STARTING 2026-03-10T07:03:08.060 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:07 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: CherryPy Checker: 2026-03-10T07:03:08.060 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:07 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: The Application mounted at '' has an empty config. 2026-03-10T07:03:08.060 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:07 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: 2026-03-10T07:03:08.060 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:07 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: [10/Mar/2026:07:03:07] ENGINE Serving on http://:::9283 2026-03-10T07:03:08.060 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:07 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: [10/Mar/2026:07:03:07] ENGINE Bus STARTED 2026-03-10T07:03:08.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:08 vm02 ceph-mon[54377]: Standby manager daemon y restarted 2026-03-10T07:03:08.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:08 vm02 ceph-mon[54377]: Standby manager daemon y started 2026-03-10T07:03:08.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:08 vm02 ceph-mon[54377]: from='mgr.? 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/crt"}]: dispatch 2026-03-10T07:03:08.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:08 vm02 ceph-mon[54377]: from='mgr.? 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T07:03:08.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:08 vm02 ceph-mon[54377]: from='mgr.? 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/key"}]: dispatch 2026-03-10T07:03:08.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:08 vm02 ceph-mon[54377]: from='mgr.? 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T07:03:08.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:08 vm02 ceph-mon[54377]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:03:08.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:08 vm02 ceph-mon[54377]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:03:08.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:08 vm02 ceph-mon[54377]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:03:08.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:08 vm02 ceph-mon[54377]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T07:03:08.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:08 vm02 ceph-mon[54377]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:03:08.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:08 vm02 ceph-mon[54377]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T07:03:08.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:08 vm02 ceph-mon[54377]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "mgr fail", "who": "x"}]: dispatch 2026-03-10T07:03:08.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:08 vm02 ceph-mon[54377]: from='mgr.24776 ' entity='mgr.x' cmd=[{"prefix": "mgr fail", "who": "x"}]: dispatch 2026-03-10T07:03:08.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:08 vm02 ceph-mon[54377]: osdmap e85: 8 total, 8 up, 8 in 2026-03-10T07:03:08.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:08 vm02 ceph-mon[50158]: Standby manager daemon y restarted 2026-03-10T07:03:08.836 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:08 vm02 ceph-mon[50158]: Standby manager daemon y started 2026-03-10T07:03:08.836 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:08 vm02 ceph-mon[50158]: from='mgr.? 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/crt"}]: dispatch 2026-03-10T07:03:08.836 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:08 vm02 ceph-mon[50158]: from='mgr.? 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T07:03:08.836 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:08 vm02 ceph-mon[50158]: from='mgr.? 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/key"}]: dispatch 2026-03-10T07:03:08.836 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:08 vm02 ceph-mon[50158]: from='mgr.? 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T07:03:08.836 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:08 vm02 ceph-mon[50158]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:03:08.836 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:08 vm02 ceph-mon[50158]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:03:08.836 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:08 vm02 ceph-mon[50158]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:03:08.836 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:08 vm02 ceph-mon[50158]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T07:03:08.836 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:08 vm02 ceph-mon[50158]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:03:08.836 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:08 vm02 ceph-mon[50158]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T07:03:08.836 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:08 vm02 ceph-mon[50158]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "mgr fail", "who": "x"}]: dispatch 2026-03-10T07:03:08.836 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:08 vm02 ceph-mon[50158]: from='mgr.24776 ' entity='mgr.x' cmd=[{"prefix": "mgr fail", "who": "x"}]: dispatch 2026-03-10T07:03:08.836 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:08 vm02 ceph-mon[50158]: osdmap e85: 8 total, 8 up, 8 in 2026-03-10T07:03:08.836 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:08 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: [10/Mar/2026:07:03:08] ENGINE Bus STOPPING 2026-03-10T07:03:09.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:08 vm05 ceph-mon[48591]: Standby manager daemon y restarted 2026-03-10T07:03:09.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:08 vm05 ceph-mon[48591]: Standby manager daemon y started 2026-03-10T07:03:09.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:08 vm05 ceph-mon[48591]: from='mgr.? 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/crt"}]: dispatch 2026-03-10T07:03:09.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:08 vm05 ceph-mon[48591]: from='mgr.? 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T07:03:09.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:08 vm05 ceph-mon[48591]: from='mgr.? 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/key"}]: dispatch 2026-03-10T07:03:09.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:08 vm05 ceph-mon[48591]: from='mgr.? 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T07:03:09.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:08 vm05 ceph-mon[48591]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:03:09.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:08 vm05 ceph-mon[48591]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:03:09.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:08 vm05 ceph-mon[48591]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:03:09.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:08 vm05 ceph-mon[48591]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T07:03:09.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:08 vm05 ceph-mon[48591]: from='mgr.24776 ' entity='mgr.x' 2026-03-10T07:03:09.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:08 vm05 ceph-mon[48591]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T07:03:09.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:08 vm05 ceph-mon[48591]: from='mgr.24776 192.168.123.105:0/3657220142' entity='mgr.x' cmd=[{"prefix": "mgr fail", "who": "x"}]: dispatch 2026-03-10T07:03:09.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:08 vm05 ceph-mon[48591]: from='mgr.24776 ' entity='mgr.x' cmd=[{"prefix": "mgr fail", "who": "x"}]: dispatch 2026-03-10T07:03:09.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:08 vm05 ceph-mon[48591]: osdmap e85: 8 total, 8 up, 8 in 2026-03-10T07:03:09.004 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:08 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: ignoring --setuser ceph since I am not root 2026-03-10T07:03:09.004 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:08 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: ignoring --setgroup ceph since I am not root 2026-03-10T07:03:09.004 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:08 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: 2026-03-10T07:03:08.708+0000 7feb1fb59140 -1 mgr[py] Module alerts has missing NOTIFY_TYPES member 2026-03-10T07:03:09.004 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:08 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: 2026-03-10T07:03:08.753+0000 7feb1fb59140 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-10T07:03:09.256 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:08 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: [10/Mar/2026:07:03:08] ENGINE HTTP Server cherrypy._cpwsgi_server.CPWSGIServer(('::', 9283)) shut down 2026-03-10T07:03:09.256 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:08 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: [10/Mar/2026:07:03:08] ENGINE Bus STOPPED 2026-03-10T07:03:09.256 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:09 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: [10/Mar/2026:07:03:09] ENGINE Bus STARTING 2026-03-10T07:03:09.503 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:09 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: 2026-03-10T07:03:09.190+0000 7feb1fb59140 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-10T07:03:09.576 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:09 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: [10/Mar/2026:07:03:09] ENGINE Serving on http://:::9283 2026-03-10T07:03:09.576 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:09 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: [10/Mar/2026:07:03:09] ENGINE Bus STARTED 2026-03-10T07:03:09.576 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:09 vm02 ceph-mon[50158]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T07:03:09.576 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:09 vm02 ceph-mon[50158]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T07:03:09.576 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:09 vm02 ceph-mon[50158]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T07:03:09.576 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:09 vm02 ceph-mon[50158]: from='mgr.24776 ' entity='mgr.x' cmd='[{"prefix": "mgr fail", "who": "x"}]': finished 2026-03-10T07:03:09.576 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:09 vm02 ceph-mon[50158]: mgrmap e25: y(active, starting, since 0.388229s) 2026-03-10T07:03:09.576 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:09 vm02 ceph-mon[50158]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-10T07:03:09.576 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:09 vm02 ceph-mon[50158]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T07:03:09.576 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:09 vm02 ceph-mon[50158]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T07:03:09.576 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:09 vm02 ceph-mon[50158]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T07:03:09.576 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:09 vm02 ceph-mon[50158]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T07:03:09.576 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:09 vm02 ceph-mon[50158]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T07:03:09.576 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:09 vm02 ceph-mon[50158]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T07:03:09.576 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:09 vm02 ceph-mon[50158]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T07:03:09.576 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:09 vm02 ceph-mon[50158]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T07:03:09.576 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:09 vm02 ceph-mon[50158]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T07:03:09.576 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:09 vm02 ceph-mon[50158]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T07:03:09.576 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:09 vm02 ceph-mon[50158]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T07:03:09.576 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:09 vm02 ceph-mon[50158]: Manager daemon y is now available 2026-03-10T07:03:09.576 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:09 vm02 ceph-mon[50158]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T07:03:09.577 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:09 vm02 ceph-mon[50158]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T07:03:09.577 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:09 vm02 ceph-mon[50158]: from='mgr.25015 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T07:03:09.577 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:09 vm02 ceph-mon[50158]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T07:03:09.577 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:09 vm02 ceph-mon[50158]: from='mgr.25015 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T07:03:09.802 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:09 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: 2026-03-10T07:03:09.538+0000 7feb1fb59140 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-10T07:03:09.802 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:09 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: /lib64/python3.9/site-packages/scipy/__init__.py:73: UserWarning: NumPy was imported from a Python sub-interpreter but NumPy does not properly support sub-interpreters. This will likely work for most users but might cause hard to track down issues or subtle bugs. A common user of the rare sub-interpreter feature is wsgi which also allows single-interpreter mode. 2026-03-10T07:03:09.802 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:09 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: Improvements in the case of bugs are welcome, but is not on the NumPy roadmap, and full support may require significant effort to achieve. 2026-03-10T07:03:09.802 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:09 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: from numpy import show_config as show_numpy_config 2026-03-10T07:03:09.802 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:09 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: 2026-03-10T07:03:09.655+0000 7feb1fb59140 -1 mgr[py] Module diskprediction_local has missing NOTIFY_TYPES member 2026-03-10T07:03:09.802 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:09 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: 2026-03-10T07:03:09.696+0000 7feb1fb59140 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-10T07:03:09.802 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:09 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: 2026-03-10T07:03:09.769+0000 7feb1fb59140 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-10T07:03:09.805 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:09 vm05 ceph-mon[48591]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T07:03:09.805 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:09 vm05 ceph-mon[48591]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T07:03:09.805 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:09 vm05 ceph-mon[48591]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T07:03:09.805 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:09 vm05 ceph-mon[48591]: from='mgr.24776 ' entity='mgr.x' cmd='[{"prefix": "mgr fail", "who": "x"}]': finished 2026-03-10T07:03:09.805 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:09 vm05 ceph-mon[48591]: mgrmap e25: y(active, starting, since 0.388229s) 2026-03-10T07:03:09.805 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:09 vm05 ceph-mon[48591]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-10T07:03:09.805 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:09 vm05 ceph-mon[48591]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T07:03:09.805 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:09 vm05 ceph-mon[48591]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T07:03:09.805 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:09 vm05 ceph-mon[48591]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T07:03:09.805 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:09 vm05 ceph-mon[48591]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T07:03:09.805 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:09 vm05 ceph-mon[48591]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T07:03:09.805 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:09 vm05 ceph-mon[48591]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T07:03:09.805 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:09 vm05 ceph-mon[48591]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T07:03:09.805 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:09 vm05 ceph-mon[48591]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T07:03:09.805 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:09 vm05 ceph-mon[48591]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T07:03:09.805 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:09 vm05 ceph-mon[48591]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T07:03:09.805 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:09 vm05 ceph-mon[48591]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T07:03:09.805 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:09 vm05 ceph-mon[48591]: Manager daemon y is now available 2026-03-10T07:03:09.805 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:09 vm05 ceph-mon[48591]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T07:03:09.805 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:09 vm05 ceph-mon[48591]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T07:03:09.805 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:09 vm05 ceph-mon[48591]: from='mgr.25015 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T07:03:09.805 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:09 vm05 ceph-mon[48591]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T07:03:09.805 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:09 vm05 ceph-mon[48591]: from='mgr.25015 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T07:03:09.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:09 vm02 ceph-mon[54377]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T07:03:09.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:09 vm02 ceph-mon[54377]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T07:03:09.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:09 vm02 ceph-mon[54377]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T07:03:09.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:09 vm02 ceph-mon[54377]: from='mgr.24776 ' entity='mgr.x' cmd='[{"prefix": "mgr fail", "who": "x"}]': finished 2026-03-10T07:03:09.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:09 vm02 ceph-mon[54377]: mgrmap e25: y(active, starting, since 0.388229s) 2026-03-10T07:03:09.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:09 vm02 ceph-mon[54377]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-10T07:03:09.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:09 vm02 ceph-mon[54377]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T07:03:09.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:09 vm02 ceph-mon[54377]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T07:03:09.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:09 vm02 ceph-mon[54377]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T07:03:09.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:09 vm02 ceph-mon[54377]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T07:03:09.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:09 vm02 ceph-mon[54377]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T07:03:09.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:09 vm02 ceph-mon[54377]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T07:03:09.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:09 vm02 ceph-mon[54377]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T07:03:09.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:09 vm02 ceph-mon[54377]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T07:03:09.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:09 vm02 ceph-mon[54377]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T07:03:09.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:09 vm02 ceph-mon[54377]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T07:03:09.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:09 vm02 ceph-mon[54377]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T07:03:09.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:09 vm02 ceph-mon[54377]: Manager daemon y is now available 2026-03-10T07:03:09.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:09 vm02 ceph-mon[54377]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T07:03:09.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:09 vm02 ceph-mon[54377]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T07:03:09.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:09 vm02 ceph-mon[54377]: from='mgr.25015 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T07:03:09.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:09 vm02 ceph-mon[54377]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T07:03:09.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:09 vm02 ceph-mon[54377]: from='mgr.25015 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T07:03:10.434 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:10 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: 2026-03-10T07:03:10.311+0000 7feb1fb59140 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-10T07:03:10.434 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:10 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: 2026-03-10T07:03:10.431+0000 7feb1fb59140 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-10T07:03:10.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:10 vm05 ceph-mon[48591]: mgrmap e26: y(active, since 1.41559s) 2026-03-10T07:03:10.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:10 vm05 ceph-mon[48591]: [10/Mar/2026:07:03:10] ENGINE Bus STARTING 2026-03-10T07:03:10.754 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:10 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: 2026-03-10T07:03:10.474+0000 7feb1fb59140 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-10T07:03:10.754 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:10 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: 2026-03-10T07:03:10.510+0000 7feb1fb59140 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-10T07:03:10.754 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:10 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: 2026-03-10T07:03:10.569+0000 7feb1fb59140 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-10T07:03:10.754 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:10 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: 2026-03-10T07:03:10.630+0000 7feb1fb59140 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-10T07:03:10.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:10 vm02 ceph-mon[50158]: mgrmap e26: y(active, since 1.41559s) 2026-03-10T07:03:10.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:10 vm02 ceph-mon[50158]: [10/Mar/2026:07:03:10] ENGINE Bus STARTING 2026-03-10T07:03:10.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:10 vm02 ceph-mon[54377]: mgrmap e26: y(active, since 1.41559s) 2026-03-10T07:03:10.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:10 vm02 ceph-mon[54377]: [10/Mar/2026:07:03:10] ENGINE Bus STARTING 2026-03-10T07:03:11.166 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:10 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: 2026-03-10T07:03:10.828+0000 7feb1fb59140 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-10T07:03:11.166 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:10 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: 2026-03-10T07:03:10.885+0000 7feb1fb59140 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-10T07:03:11.166 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:11 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: 2026-03-10T07:03:11.163+0000 7feb1fb59140 -1 mgr[py] Module rgw has missing NOTIFY_TYPES member 2026-03-10T07:03:11.476 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:11 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: 2026-03-10T07:03:11.473+0000 7feb1fb59140 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-10T07:03:11.753 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:11 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: 2026-03-10T07:03:11.519+0000 7feb1fb59140 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-10T07:03:11.753 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:11 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: 2026-03-10T07:03:11.565+0000 7feb1fb59140 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-10T07:03:11.753 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:11 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: 2026-03-10T07:03:11.651+0000 7feb1fb59140 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-10T07:03:11.753 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:11 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: 2026-03-10T07:03:11.690+0000 7feb1fb59140 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-10T07:03:11.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:11 vm05 ceph-mon[48591]: [10/Mar/2026:07:03:10] ENGINE Serving on http://192.168.123.102:8765 2026-03-10T07:03:11.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:11 vm05 ceph-mon[48591]: [10/Mar/2026:07:03:10] ENGINE Serving on https://192.168.123.102:7150 2026-03-10T07:03:11.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:11 vm05 ceph-mon[48591]: [10/Mar/2026:07:03:10] ENGINE Bus STARTED 2026-03-10T07:03:11.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:11 vm05 ceph-mon[48591]: [10/Mar/2026:07:03:10] ENGINE Client ('192.168.123.102', 59242) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-10T07:03:11.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:11 vm05 ceph-mon[48591]: pgmap v4: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail 2026-03-10T07:03:11.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:11 vm05 ceph-mon[48591]: mgrmap e27: y(active, since 2s) 2026-03-10T07:03:11.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:11 vm05 ceph-mon[48591]: from='mgr.25015 ' entity='mgr.y' 2026-03-10T07:03:11.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:11 vm05 ceph-mon[48591]: from='mgr.25015 ' entity='mgr.y' 2026-03-10T07:03:11.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:11 vm05 ceph-mon[48591]: from='mgr.25015 ' entity='mgr.y' 2026-03-10T07:03:11.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:11 vm05 ceph-mon[48591]: from='mgr.25015 ' entity='mgr.y' 2026-03-10T07:03:11.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:11 vm05 ceph-mon[48591]: from='mgr.25015 ' entity='mgr.y' 2026-03-10T07:03:11.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:11 vm05 ceph-mon[48591]: from='mgr.25015 ' entity='mgr.y' 2026-03-10T07:03:11.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:11 vm05 ceph-mon[48591]: from='mgr.25015 ' entity='mgr.y' 2026-03-10T07:03:11.873 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:11 vm02 ceph-mon[54377]: [10/Mar/2026:07:03:10] ENGINE Serving on http://192.168.123.102:8765 2026-03-10T07:03:11.873 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:11 vm02 ceph-mon[54377]: [10/Mar/2026:07:03:10] ENGINE Serving on https://192.168.123.102:7150 2026-03-10T07:03:11.873 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:11 vm02 ceph-mon[54377]: [10/Mar/2026:07:03:10] ENGINE Bus STARTED 2026-03-10T07:03:11.873 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:11 vm02 ceph-mon[54377]: [10/Mar/2026:07:03:10] ENGINE Client ('192.168.123.102', 59242) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-10T07:03:11.873 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:11 vm02 ceph-mon[54377]: pgmap v4: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail 2026-03-10T07:03:11.873 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:11 vm02 ceph-mon[54377]: mgrmap e27: y(active, since 2s) 2026-03-10T07:03:11.873 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:11 vm02 ceph-mon[54377]: from='mgr.25015 ' entity='mgr.y' 2026-03-10T07:03:11.873 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:11 vm02 ceph-mon[54377]: from='mgr.25015 ' entity='mgr.y' 2026-03-10T07:03:11.873 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:11 vm02 ceph-mon[54377]: from='mgr.25015 ' entity='mgr.y' 2026-03-10T07:03:11.873 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:11 vm02 ceph-mon[54377]: from='mgr.25015 ' entity='mgr.y' 2026-03-10T07:03:11.873 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:11 vm02 ceph-mon[54377]: from='mgr.25015 ' entity='mgr.y' 2026-03-10T07:03:11.873 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:11 vm02 ceph-mon[54377]: from='mgr.25015 ' entity='mgr.y' 2026-03-10T07:03:11.873 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:11 vm02 ceph-mon[54377]: from='mgr.25015 ' entity='mgr.y' 2026-03-10T07:03:11.873 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:11 vm02 ceph-mon[50158]: [10/Mar/2026:07:03:10] ENGINE Serving on http://192.168.123.102:8765 2026-03-10T07:03:11.874 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:11 vm02 ceph-mon[50158]: [10/Mar/2026:07:03:10] ENGINE Serving on https://192.168.123.102:7150 2026-03-10T07:03:11.874 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:11 vm02 ceph-mon[50158]: [10/Mar/2026:07:03:10] ENGINE Bus STARTED 2026-03-10T07:03:11.874 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:11 vm02 ceph-mon[50158]: [10/Mar/2026:07:03:10] ENGINE Client ('192.168.123.102', 59242) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-10T07:03:11.874 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:11 vm02 ceph-mon[50158]: pgmap v4: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail 2026-03-10T07:03:11.874 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:11 vm02 ceph-mon[50158]: mgrmap e27: y(active, since 2s) 2026-03-10T07:03:11.874 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:11 vm02 ceph-mon[50158]: from='mgr.25015 ' entity='mgr.y' 2026-03-10T07:03:11.874 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:11 vm02 ceph-mon[50158]: from='mgr.25015 ' entity='mgr.y' 2026-03-10T07:03:11.874 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:11 vm02 ceph-mon[50158]: from='mgr.25015 ' entity='mgr.y' 2026-03-10T07:03:11.874 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:11 vm02 ceph-mon[50158]: from='mgr.25015 ' entity='mgr.y' 2026-03-10T07:03:11.874 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:11 vm02 ceph-mon[50158]: from='mgr.25015 ' entity='mgr.y' 2026-03-10T07:03:11.874 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:11 vm02 ceph-mon[50158]: from='mgr.25015 ' entity='mgr.y' 2026-03-10T07:03:11.874 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:11 vm02 ceph-mon[50158]: from='mgr.25015 ' entity='mgr.y' 2026-03-10T07:03:12.038 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:11 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: 2026-03-10T07:03:11.769+0000 7feb1fb59140 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-10T07:03:12.038 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:11 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: 2026-03-10T07:03:11.888+0000 7feb1fb59140 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-10T07:03:12.503 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:12 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: 2026-03-10T07:03:12.036+0000 7feb1fb59140 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-10T07:03:12.503 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:12 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: 2026-03-10T07:03:12.076+0000 7feb1fb59140 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-10T07:03:12.503 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:12 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: [10/Mar/2026:07:03:12] ENGINE Bus STARTING 2026-03-10T07:03:12.503 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:12 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: CherryPy Checker: 2026-03-10T07:03:12.503 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:12 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: The Application mounted at '' has an empty config. 2026-03-10T07:03:12.503 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:12 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: 2026-03-10T07:03:12.503 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:12 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: [10/Mar/2026:07:03:12] ENGINE Serving on http://:::9283 2026-03-10T07:03:12.504 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:12 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[68264]: [10/Mar/2026:07:03:12] ENGINE Bus STARTED 2026-03-10T07:03:13.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:12 vm05 ceph-mon[48591]: from='mgr.25015 ' entity='mgr.y' 2026-03-10T07:03:13.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:12 vm05 ceph-mon[48591]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm05", "name": "osd_memory_target"}]: dispatch 2026-03-10T07:03:13.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:12 vm05 ceph-mon[48591]: from='mgr.25015 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm05", "name": "osd_memory_target"}]: dispatch 2026-03-10T07:03:13.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:12 vm05 ceph-mon[48591]: Standby manager daemon x started 2026-03-10T07:03:13.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:12 vm05 ceph-mon[48591]: from='mgr.? 192.168.123.105:0/2935373997' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-10T07:03:13.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:12 vm05 ceph-mon[48591]: from='mgr.? 192.168.123.105:0/2935373997' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T07:03:13.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:12 vm05 ceph-mon[48591]: from='mgr.? 192.168.123.105:0/2935373997' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-10T07:03:13.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:12 vm05 ceph-mon[48591]: from='mgr.? 192.168.123.105:0/2935373997' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T07:03:13.086 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:12 vm02 ceph-mon[50158]: from='mgr.25015 ' entity='mgr.y' 2026-03-10T07:03:13.086 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:12 vm02 ceph-mon[50158]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm05", "name": "osd_memory_target"}]: dispatch 2026-03-10T07:03:13.086 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:12 vm02 ceph-mon[50158]: from='mgr.25015 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm05", "name": "osd_memory_target"}]: dispatch 2026-03-10T07:03:13.086 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:12 vm02 ceph-mon[50158]: Standby manager daemon x started 2026-03-10T07:03:13.086 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:12 vm02 ceph-mon[50158]: from='mgr.? 192.168.123.105:0/2935373997' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-10T07:03:13.086 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:12 vm02 ceph-mon[50158]: from='mgr.? 192.168.123.105:0/2935373997' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T07:03:13.086 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:12 vm02 ceph-mon[50158]: from='mgr.? 192.168.123.105:0/2935373997' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-10T07:03:13.086 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:12 vm02 ceph-mon[50158]: from='mgr.? 192.168.123.105:0/2935373997' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T07:03:13.086 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:12 vm02 ceph-mon[54377]: from='mgr.25015 ' entity='mgr.y' 2026-03-10T07:03:13.086 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:12 vm02 ceph-mon[54377]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm05", "name": "osd_memory_target"}]: dispatch 2026-03-10T07:03:13.086 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:12 vm02 ceph-mon[54377]: from='mgr.25015 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm05", "name": "osd_memory_target"}]: dispatch 2026-03-10T07:03:13.086 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:12 vm02 ceph-mon[54377]: Standby manager daemon x started 2026-03-10T07:03:13.086 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:12 vm02 ceph-mon[54377]: from='mgr.? 192.168.123.105:0/2935373997' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-10T07:03:13.086 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:12 vm02 ceph-mon[54377]: from='mgr.? 192.168.123.105:0/2935373997' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T07:03:13.086 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:12 vm02 ceph-mon[54377]: from='mgr.? 192.168.123.105:0/2935373997' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-10T07:03:13.086 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:12 vm02 ceph-mon[54377]: from='mgr.? 192.168.123.105:0/2935373997' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T07:03:13.880 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:13 vm02 ceph-mon[54377]: pgmap v5: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail 2026-03-10T07:03:13.880 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:13 vm02 ceph-mon[54377]: mgrmap e28: y(active, since 4s), standbys: x 2026-03-10T07:03:13.880 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:13 vm02 ceph-mon[54377]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-10T07:03:13.880 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:13 vm02 ceph-mon[54377]: from='mgr.25015 ' entity='mgr.y' 2026-03-10T07:03:13.880 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:13 vm02 ceph-mon[54377]: from='mgr.25015 ' entity='mgr.y' 2026-03-10T07:03:13.880 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:13 vm02 ceph-mon[54377]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm02", "name": "osd_memory_target"}]: dispatch 2026-03-10T07:03:13.880 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:13 vm02 ceph-mon[54377]: from='mgr.25015 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm02", "name": "osd_memory_target"}]: dispatch 2026-03-10T07:03:13.880 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:13 vm02 ceph-mon[54377]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:03:13.880 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:13 vm02 ceph-mon[54377]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T07:03:13.880 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:13 vm02 ceph-mon[54377]: Updating vm02:/etc/ceph/ceph.conf 2026-03-10T07:03:13.880 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:13 vm02 ceph-mon[54377]: Updating vm05:/etc/ceph/ceph.conf 2026-03-10T07:03:13.880 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:13 vm02 ceph-mon[54377]: Updating vm05:/var/lib/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/config/ceph.conf 2026-03-10T07:03:13.880 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:13 vm02 ceph-mon[54377]: Updating vm02:/var/lib/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/config/ceph.conf 2026-03-10T07:03:13.880 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:13 vm02 ceph-mon[54377]: Updating vm05:/etc/ceph/ceph.client.admin.keyring 2026-03-10T07:03:13.880 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:13 vm02 ceph-mon[54377]: Updating vm02:/etc/ceph/ceph.client.admin.keyring 2026-03-10T07:03:13.880 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:13 vm02 ceph-mon[54377]: Updating vm05:/var/lib/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/config/ceph.client.admin.keyring 2026-03-10T07:03:13.880 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:13 vm02 ceph-mon[54377]: Updating vm02:/var/lib/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/config/ceph.client.admin.keyring 2026-03-10T07:03:13.880 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:13 vm02 ceph-mon[54377]: from='mgr.25015 ' entity='mgr.y' 2026-03-10T07:03:13.880 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:13 vm02 ceph-mon[54377]: from='mgr.25015 ' entity='mgr.y' 2026-03-10T07:03:13.880 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:13 vm02 ceph-mon[54377]: from='mgr.25015 ' entity='mgr.y' 2026-03-10T07:03:13.880 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:13 vm02 ceph-mon[54377]: from='mgr.25015 ' entity='mgr.y' 2026-03-10T07:03:13.880 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:13 vm02 ceph-mon[54377]: from='mgr.25015 ' entity='mgr.y' 2026-03-10T07:03:13.880 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:13 vm02 ceph-mon[54377]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm02.iphfbm", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T07:03:13.880 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:13 vm02 ceph-mon[54377]: from='mgr.25015 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm02.iphfbm", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T07:03:13.880 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:13 vm02 ceph-mon[54377]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:03:13.881 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:13 vm02 ceph-mon[50158]: pgmap v5: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail 2026-03-10T07:03:13.881 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:13 vm02 ceph-mon[50158]: mgrmap e28: y(active, since 4s), standbys: x 2026-03-10T07:03:13.881 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:13 vm02 ceph-mon[50158]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-10T07:03:13.881 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:13 vm02 ceph-mon[50158]: from='mgr.25015 ' entity='mgr.y' 2026-03-10T07:03:13.881 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:13 vm02 ceph-mon[50158]: from='mgr.25015 ' entity='mgr.y' 2026-03-10T07:03:13.881 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:13 vm02 ceph-mon[50158]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm02", "name": "osd_memory_target"}]: dispatch 2026-03-10T07:03:13.881 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:13 vm02 ceph-mon[50158]: from='mgr.25015 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm02", "name": "osd_memory_target"}]: dispatch 2026-03-10T07:03:13.881 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:13 vm02 ceph-mon[50158]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:03:13.881 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:13 vm02 ceph-mon[50158]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T07:03:13.881 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:13 vm02 ceph-mon[50158]: Updating vm02:/etc/ceph/ceph.conf 2026-03-10T07:03:13.881 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:13 vm02 ceph-mon[50158]: Updating vm05:/etc/ceph/ceph.conf 2026-03-10T07:03:13.881 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:13 vm02 ceph-mon[50158]: Updating vm05:/var/lib/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/config/ceph.conf 2026-03-10T07:03:13.881 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:13 vm02 ceph-mon[50158]: Updating vm02:/var/lib/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/config/ceph.conf 2026-03-10T07:03:13.881 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:13 vm02 ceph-mon[50158]: Updating vm05:/etc/ceph/ceph.client.admin.keyring 2026-03-10T07:03:13.881 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:13 vm02 ceph-mon[50158]: Updating vm02:/etc/ceph/ceph.client.admin.keyring 2026-03-10T07:03:13.881 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:13 vm02 ceph-mon[50158]: Updating vm05:/var/lib/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/config/ceph.client.admin.keyring 2026-03-10T07:03:13.881 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:13 vm02 ceph-mon[50158]: Updating vm02:/var/lib/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/config/ceph.client.admin.keyring 2026-03-10T07:03:13.881 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:13 vm02 ceph-mon[50158]: from='mgr.25015 ' entity='mgr.y' 2026-03-10T07:03:13.881 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:13 vm02 ceph-mon[50158]: from='mgr.25015 ' entity='mgr.y' 2026-03-10T07:03:13.881 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:13 vm02 ceph-mon[50158]: from='mgr.25015 ' entity='mgr.y' 2026-03-10T07:03:13.881 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:13 vm02 ceph-mon[50158]: from='mgr.25015 ' entity='mgr.y' 2026-03-10T07:03:13.881 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:13 vm02 ceph-mon[50158]: from='mgr.25015 ' entity='mgr.y' 2026-03-10T07:03:13.881 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:13 vm02 ceph-mon[50158]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm02.iphfbm", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T07:03:13.881 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:13 vm02 ceph-mon[50158]: from='mgr.25015 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm02.iphfbm", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T07:03:13.881 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:13 vm02 ceph-mon[50158]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:03:14.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:13 vm05 ceph-mon[48591]: pgmap v5: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail 2026-03-10T07:03:14.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:13 vm05 ceph-mon[48591]: mgrmap e28: y(active, since 4s), standbys: x 2026-03-10T07:03:14.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:13 vm05 ceph-mon[48591]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-10T07:03:14.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:13 vm05 ceph-mon[48591]: from='mgr.25015 ' entity='mgr.y' 2026-03-10T07:03:14.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:13 vm05 ceph-mon[48591]: from='mgr.25015 ' entity='mgr.y' 2026-03-10T07:03:14.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:13 vm05 ceph-mon[48591]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm02", "name": "osd_memory_target"}]: dispatch 2026-03-10T07:03:14.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:13 vm05 ceph-mon[48591]: from='mgr.25015 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm02", "name": "osd_memory_target"}]: dispatch 2026-03-10T07:03:14.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:13 vm05 ceph-mon[48591]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:03:14.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:13 vm05 ceph-mon[48591]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T07:03:14.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:13 vm05 ceph-mon[48591]: Updating vm02:/etc/ceph/ceph.conf 2026-03-10T07:03:14.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:13 vm05 ceph-mon[48591]: Updating vm05:/etc/ceph/ceph.conf 2026-03-10T07:03:14.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:13 vm05 ceph-mon[48591]: Updating vm05:/var/lib/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/config/ceph.conf 2026-03-10T07:03:14.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:13 vm05 ceph-mon[48591]: Updating vm02:/var/lib/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/config/ceph.conf 2026-03-10T07:03:14.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:13 vm05 ceph-mon[48591]: Updating vm05:/etc/ceph/ceph.client.admin.keyring 2026-03-10T07:03:14.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:13 vm05 ceph-mon[48591]: Updating vm02:/etc/ceph/ceph.client.admin.keyring 2026-03-10T07:03:14.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:13 vm05 ceph-mon[48591]: Updating vm05:/var/lib/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/config/ceph.client.admin.keyring 2026-03-10T07:03:14.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:13 vm05 ceph-mon[48591]: Updating vm02:/var/lib/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/config/ceph.client.admin.keyring 2026-03-10T07:03:14.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:13 vm05 ceph-mon[48591]: from='mgr.25015 ' entity='mgr.y' 2026-03-10T07:03:14.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:13 vm05 ceph-mon[48591]: from='mgr.25015 ' entity='mgr.y' 2026-03-10T07:03:14.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:13 vm05 ceph-mon[48591]: from='mgr.25015 ' entity='mgr.y' 2026-03-10T07:03:14.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:13 vm05 ceph-mon[48591]: from='mgr.25015 ' entity='mgr.y' 2026-03-10T07:03:14.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:13 vm05 ceph-mon[48591]: from='mgr.25015 ' entity='mgr.y' 2026-03-10T07:03:14.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:13 vm05 ceph-mon[48591]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm02.iphfbm", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T07:03:14.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:13 vm05 ceph-mon[48591]: from='mgr.25015 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm02.iphfbm", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T07:03:14.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:13 vm05 ceph-mon[48591]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:03:14.820 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:14 vm05 systemd[1]: Stopping Ceph prometheus.a for 28bd35e6-1c4e-11f1-9057-21b3549603fc... 2026-03-10T07:03:15.089 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:14 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[71513]: ts=2026-03-10T07:03:14.819Z caller=main.go:964 level=warn msg="Received SIGTERM, exiting gracefully..." 2026-03-10T07:03:15.089 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:14 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[71513]: ts=2026-03-10T07:03:14.819Z caller=main.go:988 level=info msg="Stopping scrape discovery manager..." 2026-03-10T07:03:15.090 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:14 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[71513]: ts=2026-03-10T07:03:14.819Z caller=main.go:1002 level=info msg="Stopping notify discovery manager..." 2026-03-10T07:03:15.090 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:14 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[71513]: ts=2026-03-10T07:03:14.819Z caller=manager.go:177 level=info component="rule manager" msg="Stopping rule manager..." 2026-03-10T07:03:15.090 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:14 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[71513]: ts=2026-03-10T07:03:14.819Z caller=manager.go:187 level=info component="rule manager" msg="Rule manager stopped" 2026-03-10T07:03:15.090 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:14 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[71513]: ts=2026-03-10T07:03:14.819Z caller=main.go:1039 level=info msg="Stopping scrape manager..." 2026-03-10T07:03:15.090 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:14 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[71513]: ts=2026-03-10T07:03:14.819Z caller=main.go:984 level=info msg="Scrape discovery manager stopped" 2026-03-10T07:03:15.090 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:14 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[71513]: ts=2026-03-10T07:03:14.819Z caller=main.go:998 level=info msg="Notify discovery manager stopped" 2026-03-10T07:03:15.090 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:14 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[71513]: ts=2026-03-10T07:03:14.820Z caller=main.go:1031 level=info msg="Scrape manager stopped" 2026-03-10T07:03:15.090 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:14 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[71513]: ts=2026-03-10T07:03:14.822Z caller=notifier.go:618 level=info component=notifier msg="Stopping notification manager..." 2026-03-10T07:03:15.090 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:14 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[71513]: ts=2026-03-10T07:03:14.822Z caller=main.go:1261 level=info msg="Notifier manager stopped" 2026-03-10T07:03:15.090 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:14 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[71513]: ts=2026-03-10T07:03:14.823Z caller=main.go:1273 level=info msg="See you next time!" 2026-03-10T07:03:15.090 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:14 vm05 podman[73536]: 2026-03-10 07:03:14.830575713 +0000 UTC m=+0.028998170 container died 3d2bddb87dc57c0be0eb81eaf4354abb441d4305a65a8e5590bf72d990dd7366 (image=quay.io/prometheus/prometheus:v2.51.0, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-10T07:03:15.090 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:14 vm05 podman[73536]: 2026-03-10 07:03:14.846808843 +0000 UTC m=+0.045231300 container remove 3d2bddb87dc57c0be0eb81eaf4354abb441d4305a65a8e5590bf72d990dd7366 (image=quay.io/prometheus/prometheus:v2.51.0, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-10T07:03:15.090 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:14 vm05 bash[73536]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a 2026-03-10T07:03:15.090 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:14 vm05 systemd[1]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@prometheus.a.service: Deactivated successfully. 2026-03-10T07:03:15.090 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:14 vm05 systemd[1]: Stopped Ceph prometheus.a for 28bd35e6-1c4e-11f1-9057-21b3549603fc. 2026-03-10T07:03:15.090 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:14 vm05 systemd[1]: Starting Ceph prometheus.a for 28bd35e6-1c4e-11f1-9057-21b3549603fc... 2026-03-10T07:03:15.090 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:15 vm05 podman[73602]: 2026-03-10 07:03:15.026304798 +0000 UTC m=+0.021157508 container create 2f14ff887bc7d1d6dd40d0cdac3a272c9a3d688871404114c080f6cfaaeb2799 (image=quay.io/prometheus/prometheus:v2.51.0, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-10T07:03:15.090 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:15 vm05 podman[73602]: 2026-03-10 07:03:15.054400968 +0000 UTC m=+0.049253678 container init 2f14ff887bc7d1d6dd40d0cdac3a272c9a3d688871404114c080f6cfaaeb2799 (image=quay.io/prometheus/prometheus:v2.51.0, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-10T07:03:15.090 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:15 vm05 podman[73602]: 2026-03-10 07:03:15.057435719 +0000 UTC m=+0.052288429 container start 2f14ff887bc7d1d6dd40d0cdac3a272c9a3d688871404114c080f6cfaaeb2799 (image=quay.io/prometheus/prometheus:v2.51.0, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-10T07:03:15.090 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:15 vm05 bash[73602]: 2f14ff887bc7d1d6dd40d0cdac3a272c9a3d688871404114c080f6cfaaeb2799 2026-03-10T07:03:15.090 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:15 vm05 podman[73602]: 2026-03-10 07:03:15.017787647 +0000 UTC m=+0.012640357 image pull 1d3b7f56885b6dd623f1785be963aa9c195f86bc256ea454e8d02a7980b79c53 quay.io/prometheus/prometheus:v2.51.0 2026-03-10T07:03:15.090 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:15 vm05 systemd[1]: Started Ceph prometheus.a for 28bd35e6-1c4e-11f1-9057-21b3549603fc. 2026-03-10T07:03:15.090 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:15 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:03:15.087Z caller=main.go:617 level=info msg="Starting Prometheus Server" mode=server version="(version=2.51.0, branch=HEAD, revision=c05c15512acb675e3f6cd662a6727854e93fc024)" 2026-03-10T07:03:15.090 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:15 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:03:15.088Z caller=main.go:622 level=info build_context="(go=go1.22.1, platform=linux/amd64, user=root@b5723e458358, date=20240319-10:54:45, tags=netgo,builtinassets,stringlabels)" 2026-03-10T07:03:15.090 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:15 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:03:15.089Z caller=main.go:623 level=info host_details="(Linux 5.14.0-686.el9.x86_64 #1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026 x86_64 vm05 (none))" 2026-03-10T07:03:15.090 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:15 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:03:15.089Z caller=main.go:624 level=info fd_limits="(soft=1048576, hard=1048576)" 2026-03-10T07:03:15.090 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:15 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:03:15.089Z caller=main.go:625 level=info vm_limits="(soft=unlimited, hard=unlimited)" 2026-03-10T07:03:15.374 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:15 vm02 ceph-mon[54377]: Reconfiguring iscsi.foo.vm02.iphfbm (dependencies changed)... 2026-03-10T07:03:15.374 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:15 vm02 ceph-mon[54377]: Reconfiguring daemon iscsi.foo.vm02.iphfbm on vm02 2026-03-10T07:03:15.374 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:15 vm02 ceph-mon[54377]: from='mgr.25015 ' entity='mgr.y' 2026-03-10T07:03:15.374 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:15 vm02 ceph-mon[54377]: from='mgr.25015 ' entity='mgr.y' 2026-03-10T07:03:15.374 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:15 vm02 ceph-mon[54377]: Reconfiguring prometheus.a (dependencies changed)... 2026-03-10T07:03:15.374 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:15 vm02 ceph-mon[54377]: from='client.? 192.168.123.102:0/1718787573' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-10T07:03:15.374 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:15 vm02 ceph-mon[54377]: from='mgr.25015 ' entity='mgr.y' 2026-03-10T07:03:15.374 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:15 vm02 ceph-mon[54377]: from='mgr.25015 ' entity='mgr.y' 2026-03-10T07:03:15.374 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:15 vm02 ceph-mon[54377]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-10T07:03:15.374 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:15 vm02 ceph-mon[54377]: from='mgr.25015 ' entity='mgr.y' 2026-03-10T07:03:15.374 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:15 vm02 ceph-mon[54377]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-10T07:03:15.374 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:15 vm02 ceph-mon[54377]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "dashboard iscsi-gateway-add", "name": "vm02"}]: dispatch 2026-03-10T07:03:15.374 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:15 vm02 ceph-mon[54377]: from='mgr.25015 ' entity='mgr.y' 2026-03-10T07:03:15.374 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:15 vm02 ceph-mon[54377]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T07:03:15.374 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:15 vm02 ceph-mon[54377]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm05.local:9095"}]: dispatch 2026-03-10T07:03:15.374 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:15 vm02 ceph-mon[54377]: from='mgr.25015 ' entity='mgr.y' 2026-03-10T07:03:15.374 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:15 vm02 ceph-mon[54377]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T07:03:15.374 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:15 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: [10/Mar/2026:07:03:15] ENGINE Bus STOPPING 2026-03-10T07:03:15.374 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:15 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: [10/Mar/2026:07:03:15] ENGINE HTTP Server cherrypy._cpwsgi_server.CPWSGIServer(('::', 9283)) shut down 2026-03-10T07:03:15.374 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:15 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: [10/Mar/2026:07:03:15] ENGINE Bus STOPPED 2026-03-10T07:03:15.374 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:15 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: [10/Mar/2026:07:03:15] ENGINE Bus STARTING 2026-03-10T07:03:15.375 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:15 vm02 ceph-mon[50158]: Reconfiguring iscsi.foo.vm02.iphfbm (dependencies changed)... 2026-03-10T07:03:15.375 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:15 vm02 ceph-mon[50158]: Reconfiguring daemon iscsi.foo.vm02.iphfbm on vm02 2026-03-10T07:03:15.375 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:15 vm02 ceph-mon[50158]: from='mgr.25015 ' entity='mgr.y' 2026-03-10T07:03:15.375 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:15 vm02 ceph-mon[50158]: from='mgr.25015 ' entity='mgr.y' 2026-03-10T07:03:15.375 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:15 vm02 ceph-mon[50158]: Reconfiguring prometheus.a (dependencies changed)... 2026-03-10T07:03:15.375 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:15 vm02 ceph-mon[50158]: from='client.? 192.168.123.102:0/1718787573' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-10T07:03:15.375 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:15 vm02 ceph-mon[50158]: from='mgr.25015 ' entity='mgr.y' 2026-03-10T07:03:15.375 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:15 vm02 ceph-mon[50158]: from='mgr.25015 ' entity='mgr.y' 2026-03-10T07:03:15.375 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:15 vm02 ceph-mon[50158]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-10T07:03:15.375 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:15 vm02 ceph-mon[50158]: from='mgr.25015 ' entity='mgr.y' 2026-03-10T07:03:15.375 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:15 vm02 ceph-mon[50158]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-10T07:03:15.375 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:15 vm02 ceph-mon[50158]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "dashboard iscsi-gateway-add", "name": "vm02"}]: dispatch 2026-03-10T07:03:15.375 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:15 vm02 ceph-mon[50158]: from='mgr.25015 ' entity='mgr.y' 2026-03-10T07:03:15.375 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:15 vm02 ceph-mon[50158]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T07:03:15.375 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:15 vm02 ceph-mon[50158]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm05.local:9095"}]: dispatch 2026-03-10T07:03:15.375 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:15 vm02 ceph-mon[50158]: from='mgr.25015 ' entity='mgr.y' 2026-03-10T07:03:15.375 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:15 vm02 ceph-mon[50158]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T07:03:15.387 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:15 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:03:15.093Z caller=web.go:568 level=info component=web msg="Start listening for connections" address=:9095 2026-03-10T07:03:15.387 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:15 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:03:15.094Z caller=main.go:1129 level=info msg="Starting TSDB ..." 2026-03-10T07:03:15.387 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:15 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:03:15.096Z caller=head.go:616 level=info component=tsdb msg="Replaying on-disk memory mappable chunks if any" 2026-03-10T07:03:15.387 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:15 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:03:15.096Z caller=head.go:698 level=info component=tsdb msg="On-disk memory mappable chunks replay completed" duration=1.162µs 2026-03-10T07:03:15.387 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:15 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:03:15.096Z caller=head.go:706 level=info component=tsdb msg="Replaying WAL, this may take a while" 2026-03-10T07:03:15.387 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:15 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:03:15.101Z caller=tls_config.go:313 level=info component=web msg="Listening on" address=[::]:9095 2026-03-10T07:03:15.387 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:15 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:03:15.101Z caller=tls_config.go:316 level=info component=web msg="TLS is disabled." http2=false address=[::]:9095 2026-03-10T07:03:15.387 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:15 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:03:15.105Z caller=head.go:778 level=info component=tsdb msg="WAL segment loaded" segment=0 maxSegment=3 2026-03-10T07:03:15.387 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:15 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:03:15.115Z caller=head.go:778 level=info component=tsdb msg="WAL segment loaded" segment=1 maxSegment=3 2026-03-10T07:03:15.387 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:15 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:03:15.116Z caller=head.go:778 level=info component=tsdb msg="WAL segment loaded" segment=2 maxSegment=3 2026-03-10T07:03:15.387 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:15 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:03:15.117Z caller=head.go:778 level=info component=tsdb msg="WAL segment loaded" segment=3 maxSegment=3 2026-03-10T07:03:15.387 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:15 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:03:15.117Z caller=head.go:815 level=info component=tsdb msg="WAL replay completed" checkpoint_replay_duration=18.485µs wal_replay_duration=20.700931ms wbl_replay_duration=331ns total_replay_duration=20.733112ms 2026-03-10T07:03:15.387 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:15 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:03:15.120Z caller=main.go:1150 level=info fs_type=XFS_SUPER_MAGIC 2026-03-10T07:03:15.387 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:15 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:03:15.120Z caller=main.go:1153 level=info msg="TSDB started" 2026-03-10T07:03:15.387 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:15 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:03:15.120Z caller=main.go:1335 level=info msg="Loading configuration file" filename=/etc/prometheus/prometheus.yml 2026-03-10T07:03:15.387 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:15 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:03:15.139Z caller=main.go:1372 level=info msg="Completed loading of configuration file" filename=/etc/prometheus/prometheus.yml totalDuration=19.59398ms db_storage=1.212µs remote_storage=1.302µs web_handler=662ns query_engine=862ns scrape=960.759µs scrape_sd=218.449µs notify=11.522µs notify_sd=6.753µs rules=18.053396ms tracing=8.286µs 2026-03-10T07:03:15.387 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:15 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:03:15.140Z caller=main.go:1114 level=info msg="Server is ready to receive web requests." 2026-03-10T07:03:15.387 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:15 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:03:15.140Z caller=manager.go:163 level=info component="rule manager" msg="Starting rule manager..." 2026-03-10T07:03:15.388 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:15 vm05 ceph-mon[48591]: Reconfiguring iscsi.foo.vm02.iphfbm (dependencies changed)... 2026-03-10T07:03:15.388 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:15 vm05 ceph-mon[48591]: Reconfiguring daemon iscsi.foo.vm02.iphfbm on vm02 2026-03-10T07:03:15.388 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:15 vm05 ceph-mon[48591]: from='mgr.25015 ' entity='mgr.y' 2026-03-10T07:03:15.388 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:15 vm05 ceph-mon[48591]: from='mgr.25015 ' entity='mgr.y' 2026-03-10T07:03:15.388 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:15 vm05 ceph-mon[48591]: Reconfiguring prometheus.a (dependencies changed)... 2026-03-10T07:03:15.388 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:15 vm05 ceph-mon[48591]: from='client.? 192.168.123.102:0/1718787573' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-10T07:03:15.388 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:15 vm05 ceph-mon[48591]: from='mgr.25015 ' entity='mgr.y' 2026-03-10T07:03:15.388 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:15 vm05 ceph-mon[48591]: from='mgr.25015 ' entity='mgr.y' 2026-03-10T07:03:15.388 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:15 vm05 ceph-mon[48591]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-10T07:03:15.388 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:15 vm05 ceph-mon[48591]: from='mgr.25015 ' entity='mgr.y' 2026-03-10T07:03:15.388 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:15 vm05 ceph-mon[48591]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-10T07:03:15.388 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:15 vm05 ceph-mon[48591]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "dashboard iscsi-gateway-add", "name": "vm02"}]: dispatch 2026-03-10T07:03:15.388 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:15 vm05 ceph-mon[48591]: from='mgr.25015 ' entity='mgr.y' 2026-03-10T07:03:15.388 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:15 vm05 ceph-mon[48591]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T07:03:15.388 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:15 vm05 ceph-mon[48591]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm05.local:9095"}]: dispatch 2026-03-10T07:03:15.388 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:15 vm05 ceph-mon[48591]: from='mgr.25015 ' entity='mgr.y' 2026-03-10T07:03:15.388 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:15 vm05 ceph-mon[48591]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T07:03:15.835 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:15 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: [10/Mar/2026:07:03:15] ENGINE Serving on http://:::9283 2026-03-10T07:03:15.835 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:15 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: [10/Mar/2026:07:03:15] ENGINE Bus STARTED 2026-03-10T07:03:16.003 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:15 vm05 systemd[1]: Stopping Ceph mgr.x for 28bd35e6-1c4e-11f1-9057-21b3549603fc... 2026-03-10T07:03:16.339 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:16 vm05 ceph-mon[48591]: Reconfiguring daemon prometheus.a on vm05 2026-03-10T07:03:16.339 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:16 vm05 ceph-mon[48591]: pgmap v6: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail 2026-03-10T07:03:16.339 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:16 vm05 ceph-mon[48591]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-10T07:03:16.339 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:16 vm05 ceph-mon[48591]: Adding iSCSI gateway http://:@192.168.123.102:5000 to Dashboard 2026-03-10T07:03:16.339 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:16 vm05 ceph-mon[48591]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-10T07:03:16.339 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:16 vm05 ceph-mon[48591]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-add", "name": "vm02"}]: dispatch 2026-03-10T07:03:16.339 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:16 vm05 ceph-mon[48591]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T07:03:16.339 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:16 vm05 ceph-mon[48591]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm05.local:9095"}]: dispatch 2026-03-10T07:03:16.339 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:16 vm05 ceph-mon[48591]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T07:03:16.339 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:16 vm05 ceph-mon[48591]: from='mgr.25015 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T07:03:16.339 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:16 vm05 ceph-mon[48591]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-10T07:03:16.339 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:16 vm05 ceph-mon[48591]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:03:16.339 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:16 vm05 podman[73856]: 2026-03-10 07:03:16.070184603 +0000 UTC m=+0.044999246 container died 1fbb0bd98b142a7bce6436903745d28ac7c2ca57c8f216b2e99f0b9863f5b87c (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.vendor=CentOS, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, CEPH_REF=squid, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.build-date=20260223) 2026-03-10T07:03:16.339 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:16 vm05 podman[73856]: 2026-03-10 07:03:16.099918098 +0000 UTC m=+0.074732741 container remove 1fbb0bd98b142a7bce6436903745d28ac7c2ca57c8f216b2e99f0b9863f5b87c (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.41.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, org.label-schema.build-date=20260223) 2026-03-10T07:03:16.339 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:16 vm05 bash[73856]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x 2026-03-10T07:03:16.339 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:16 vm05 systemd[1]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@mgr.x.service: Main process exited, code=exited, status=143/n/a 2026-03-10T07:03:16.339 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:16 vm05 systemd[1]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@mgr.x.service: Failed with result 'exit-code'. 2026-03-10T07:03:16.339 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:16 vm05 systemd[1]: Stopped Ceph mgr.x for 28bd35e6-1c4e-11f1-9057-21b3549603fc. 2026-03-10T07:03:16.339 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:16 vm05 systemd[1]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@mgr.x.service: Consumed 11.913s CPU time. 2026-03-10T07:03:16.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:16 vm02 ceph-mon[54377]: Reconfiguring daemon prometheus.a on vm05 2026-03-10T07:03:16.586 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:16 vm02 ceph-mon[54377]: pgmap v6: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail 2026-03-10T07:03:16.586 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:16 vm02 ceph-mon[54377]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-10T07:03:16.586 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:16 vm02 ceph-mon[54377]: Adding iSCSI gateway http://:@192.168.123.102:5000 to Dashboard 2026-03-10T07:03:16.586 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:16 vm02 ceph-mon[54377]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-10T07:03:16.586 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:16 vm02 ceph-mon[54377]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-add", "name": "vm02"}]: dispatch 2026-03-10T07:03:16.586 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:16 vm02 ceph-mon[54377]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T07:03:16.586 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:16 vm02 ceph-mon[54377]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm05.local:9095"}]: dispatch 2026-03-10T07:03:16.586 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:16 vm02 ceph-mon[54377]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T07:03:16.586 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:16 vm02 ceph-mon[54377]: from='mgr.25015 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T07:03:16.586 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:16 vm02 ceph-mon[54377]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-10T07:03:16.586 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:16 vm02 ceph-mon[54377]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:03:16.586 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:16 vm02 ceph-mon[50158]: Reconfiguring daemon prometheus.a on vm05 2026-03-10T07:03:16.586 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:16 vm02 ceph-mon[50158]: pgmap v6: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail 2026-03-10T07:03:16.586 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:16 vm02 ceph-mon[50158]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-10T07:03:16.586 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:16 vm02 ceph-mon[50158]: Adding iSCSI gateway http://:@192.168.123.102:5000 to Dashboard 2026-03-10T07:03:16.586 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:16 vm02 ceph-mon[50158]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-10T07:03:16.586 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:16 vm02 ceph-mon[50158]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-add", "name": "vm02"}]: dispatch 2026-03-10T07:03:16.586 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:16 vm02 ceph-mon[50158]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T07:03:16.586 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:16 vm02 ceph-mon[50158]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm05.local:9095"}]: dispatch 2026-03-10T07:03:16.586 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:16 vm02 ceph-mon[50158]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T07:03:16.586 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:16 vm02 ceph-mon[50158]: from='mgr.25015 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T07:03:16.586 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:16 vm02 ceph-mon[50158]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-10T07:03:16.586 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:16 vm02 ceph-mon[50158]: from='mgr.25015 192.168.123.102:0/1831559897' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:03:16.593 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:16 vm05 systemd[1]: Starting Ceph mgr.x for 28bd35e6-1c4e-11f1-9057-21b3549603fc... 2026-03-10T07:03:16.593 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:16 vm05 podman[73968]: 2026-03-10 07:03:16.430292641 +0000 UTC m=+0.018759921 container create cdd4c8db2d430ac47226294b8bab09acdd7eb30d260abaca17924bac5fedd82f (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20260223, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, CEPH_REF=squid, ceph=True) 2026-03-10T07:03:16.593 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:16 vm05 podman[73968]: 2026-03-10 07:03:16.480875415 +0000 UTC m=+0.069342695 container init cdd4c8db2d430ac47226294b8bab09acdd7eb30d260abaca17924bac5fedd82f (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, io.buildah.version=1.41.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=squid, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20260223, OSD_FLAVOR=default) 2026-03-10T07:03:16.593 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:16 vm05 podman[73968]: 2026-03-10 07:03:16.483332093 +0000 UTC m=+0.071799363 container start cdd4c8db2d430ac47226294b8bab09acdd7eb30d260abaca17924bac5fedd82f (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x, org.label-schema.vendor=CentOS, ceph=True, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.schema-version=1.0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, io.buildah.version=1.41.3, OSD_FLAVOR=default, CEPH_REF=squid, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20260223, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-10T07:03:16.593 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:16 vm05 bash[73968]: cdd4c8db2d430ac47226294b8bab09acdd7eb30d260abaca17924bac5fedd82f 2026-03-10T07:03:16.593 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:16 vm05 podman[73968]: 2026-03-10 07:03:16.423007486 +0000 UTC m=+0.011474766 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T07:03:16.593 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:16 vm05 systemd[1]: Started Ceph mgr.x for 28bd35e6-1c4e-11f1-9057-21b3549603fc. 2026-03-10T07:03:16.593 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:16 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[73978]: 2026-03-10T07:03:16.590+0000 7fb5e33ce140 -1 mgr[py] Module alerts has missing NOTIFY_TYPES member 2026-03-10T07:03:16.897 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:16 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[73978]: 2026-03-10T07:03:16.633+0000 7fb5e33ce140 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-10T07:03:17.206 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:17 vm05 ceph-mon[48591]: Upgrade: Updating mgr.x 2026-03-10T07:03:17.207 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:17 vm05 ceph-mon[48591]: Deploying daemon mgr.x on vm05 2026-03-10T07:03:17.207 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:17 vm05 ceph-mon[48591]: from='mgr.25015 ' entity='mgr.y' 2026-03-10T07:03:17.207 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:17 vm05 ceph-mon[48591]: from='mgr.25015 ' entity='mgr.y' 2026-03-10T07:03:17.207 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:17 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[73978]: 2026-03-10T07:03:17.065+0000 7fb5e33ce140 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-10T07:03:17.207 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:16 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:03:16.947Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm02\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"28bd35e6-1c4e-11f1-9057-21b3549603fc\", domainname=\"(none)\", instance=\"vm02\", job=\"node\", machine=\"x86_64\", nodename=\"vm02\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm02\", job=\"node\", machine=\"x86_64\", nodename=\"vm02\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T07:03:17.503 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:17 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[73978]: 2026-03-10T07:03:17.431+0000 7fb5e33ce140 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-10T07:03:17.508 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:17 vm02 ceph-mon[54377]: Upgrade: Updating mgr.x 2026-03-10T07:03:17.508 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:17 vm02 ceph-mon[54377]: Deploying daemon mgr.x on vm05 2026-03-10T07:03:17.508 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:17 vm02 ceph-mon[54377]: from='mgr.25015 ' entity='mgr.y' 2026-03-10T07:03:17.508 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:17 vm02 ceph-mon[54377]: from='mgr.25015 ' entity='mgr.y' 2026-03-10T07:03:17.508 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:17 vm02 ceph-mon[50158]: Upgrade: Updating mgr.x 2026-03-10T07:03:17.508 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:17 vm02 ceph-mon[50158]: Deploying daemon mgr.x on vm05 2026-03-10T07:03:17.508 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:17 vm02 ceph-mon[50158]: from='mgr.25015 ' entity='mgr.y' 2026-03-10T07:03:17.508 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:17 vm02 ceph-mon[50158]: from='mgr.25015 ' entity='mgr.y' 2026-03-10T07:03:17.800 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:17 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[73978]: /lib64/python3.9/site-packages/scipy/__init__.py:73: UserWarning: NumPy was imported from a Python sub-interpreter but NumPy does not properly support sub-interpreters. This will likely work for most users but might cause hard to track down issues or subtle bugs. A common user of the rare sub-interpreter feature is wsgi which also allows single-interpreter mode. 2026-03-10T07:03:17.801 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:17 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[73978]: Improvements in the case of bugs are welcome, but is not on the NumPy roadmap, and full support may require significant effort to achieve. 2026-03-10T07:03:17.801 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:17 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[73978]: from numpy import show_config as show_numpy_config 2026-03-10T07:03:17.801 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:17 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[73978]: 2026-03-10T07:03:17.533+0000 7fb5e33ce140 -1 mgr[py] Module diskprediction_local has missing NOTIFY_TYPES member 2026-03-10T07:03:17.801 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:17 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[73978]: 2026-03-10T07:03:17.581+0000 7fb5e33ce140 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-10T07:03:17.801 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:17 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[73978]: 2026-03-10T07:03:17.675+0000 7fb5e33ce140 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-10T07:03:18.208 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:18 vm05 ceph-mon[48591]: pgmap v7: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 26 KiB/s rd, 0 B/s wr, 11 op/s 2026-03-10T07:03:18.208 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:18 vm05 ceph-mon[48591]: from='mgr.25015 ' entity='mgr.y' 2026-03-10T07:03:18.208 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:18 vm05 ceph-mon[48591]: from='mgr.25015 ' entity='mgr.y' 2026-03-10T07:03:18.208 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:18 vm05 ceph-mon[48591]: from='mgr.25015 ' entity='mgr.y' 2026-03-10T07:03:18.208 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:18 vm05 ceph-mon[48591]: from='mgr.25015 ' entity='mgr.y' 2026-03-10T07:03:18.469 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:18 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[73978]: 2026-03-10T07:03:18.334+0000 7fb5e33ce140 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-10T07:03:18.469 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:18 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[73978]: 2026-03-10T07:03:18.467+0000 7fb5e33ce140 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-10T07:03:18.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:18 vm02 ceph-mon[54377]: pgmap v7: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 26 KiB/s rd, 0 B/s wr, 11 op/s 2026-03-10T07:03:18.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:18 vm02 ceph-mon[54377]: from='mgr.25015 ' entity='mgr.y' 2026-03-10T07:03:18.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:18 vm02 ceph-mon[54377]: from='mgr.25015 ' entity='mgr.y' 2026-03-10T07:03:18.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:18 vm02 ceph-mon[54377]: from='mgr.25015 ' entity='mgr.y' 2026-03-10T07:03:18.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:18 vm02 ceph-mon[54377]: from='mgr.25015 ' entity='mgr.y' 2026-03-10T07:03:18.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:18 vm02 ceph-mon[50158]: pgmap v7: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 26 KiB/s rd, 0 B/s wr, 11 op/s 2026-03-10T07:03:18.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:18 vm02 ceph-mon[50158]: from='mgr.25015 ' entity='mgr.y' 2026-03-10T07:03:18.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:18 vm02 ceph-mon[50158]: from='mgr.25015 ' entity='mgr.y' 2026-03-10T07:03:18.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:18 vm02 ceph-mon[50158]: from='mgr.25015 ' entity='mgr.y' 2026-03-10T07:03:18.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:18 vm02 ceph-mon[50158]: from='mgr.25015 ' entity='mgr.y' 2026-03-10T07:03:18.754 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:18 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[73978]: 2026-03-10T07:03:18.511+0000 7fb5e33ce140 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-10T07:03:18.754 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:18 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[73978]: 2026-03-10T07:03:18.556+0000 7fb5e33ce140 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-10T07:03:18.754 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:18 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[73978]: 2026-03-10T07:03:18.614+0000 7fb5e33ce140 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-10T07:03:18.754 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:18 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[73978]: 2026-03-10T07:03:18.655+0000 7fb5e33ce140 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-10T07:03:19.092 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:18 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[73978]: 2026-03-10T07:03:18.830+0000 7fb5e33ce140 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-10T07:03:19.092 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:18 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[73978]: 2026-03-10T07:03:18.890+0000 7fb5e33ce140 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-10T07:03:19.503 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:19 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[73978]: 2026-03-10T07:03:19.179+0000 7fb5e33ce140 -1 mgr[py] Module rgw has missing NOTIFY_TYPES member 2026-03-10T07:03:19.773 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:19 vm05 ceph-mon[48591]: from='mgr.25015 ' entity='mgr.y' 2026-03-10T07:03:19.773 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:19 vm05 ceph-mon[48591]: from='mgr.25015 ' entity='mgr.y' 2026-03-10T07:03:19.773 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:19 vm05 ceph-mon[48591]: pgmap v8: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 20 KiB/s rd, 0 B/s wr, 8 op/s 2026-03-10T07:03:19.773 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:19 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[73978]: 2026-03-10T07:03:19.511+0000 7fb5e33ce140 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-10T07:03:19.773 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:19 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[73978]: 2026-03-10T07:03:19.551+0000 7fb5e33ce140 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-10T07:03:19.773 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:19 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[73978]: 2026-03-10T07:03:19.598+0000 7fb5e33ce140 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-10T07:03:19.773 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:19 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[73978]: 2026-03-10T07:03:19.688+0000 7fb5e33ce140 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-10T07:03:19.773 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:19 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[73978]: 2026-03-10T07:03:19.729+0000 7fb5e33ce140 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-10T07:03:19.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:19 vm02 ceph-mon[54377]: from='mgr.25015 ' entity='mgr.y' 2026-03-10T07:03:19.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:19 vm02 ceph-mon[54377]: from='mgr.25015 ' entity='mgr.y' 2026-03-10T07:03:19.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:19 vm02 ceph-mon[54377]: pgmap v8: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 20 KiB/s rd, 0 B/s wr, 8 op/s 2026-03-10T07:03:19.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:19 vm02 ceph-mon[50158]: from='mgr.25015 ' entity='mgr.y' 2026-03-10T07:03:19.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:19 vm02 ceph-mon[50158]: from='mgr.25015 ' entity='mgr.y' 2026-03-10T07:03:19.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:19 vm02 ceph-mon[50158]: pgmap v8: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 20 KiB/s rd, 0 B/s wr, 8 op/s 2026-03-10T07:03:20.114 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:19 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[73978]: 2026-03-10T07:03:19.833+0000 7fb5e33ce140 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-10T07:03:20.114 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:19 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[73978]: 2026-03-10T07:03:19.962+0000 7fb5e33ce140 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-10T07:03:20.503 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:20 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[73978]: 2026-03-10T07:03:20.112+0000 7fb5e33ce140 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-10T07:03:20.503 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:20 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[73978]: 2026-03-10T07:03:20.156+0000 7fb5e33ce140 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-10T07:03:20.503 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:20 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[73978]: [10/Mar/2026:07:03:20] ENGINE Bus STARTING 2026-03-10T07:03:20.503 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:20 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[73978]: CherryPy Checker: 2026-03-10T07:03:20.503 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:20 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[73978]: The Application mounted at '' has an empty config. 2026-03-10T07:03:20.503 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:20 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[73978]: 2026-03-10T07:03:20.503 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:20 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[73978]: [10/Mar/2026:07:03:20] ENGINE Serving on http://:::9283 2026-03-10T07:03:20.503 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:20 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[73978]: [10/Mar/2026:07:03:20] ENGINE Bus STARTED 2026-03-10T07:03:20.805 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:20 vm02 systemd[1]: Stopping Ceph mon.a for 28bd35e6-1c4e-11f1-9057-21b3549603fc... 2026-03-10T07:03:21.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:20 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mon-a[50154]: 2026-03-10T07:03:20.797+0000 7fd21d2d4700 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-mon -n mon.a -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false (PID: 1) UID: 0 2026-03-10T07:03:21.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:20 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mon-a[50154]: 2026-03-10T07:03:20.797+0000 7fd21d2d4700 -1 mon.a@0(leader) e3 *** Got Signal Terminated *** 2026-03-10T07:03:21.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:20 vm02 podman[86025]: 2026-03-10 07:03:20.832412206 +0000 UTC m=+0.049111254 container died 16a124fbc55b12d938ec5e592908f406ffd1abd96c7ef7ad8693077cd4baff2c (image=quay.io/ceph/ceph:v17.2.0, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mon-a, com.redhat.component=centos-stream-container, io.buildah.version=1.19.8, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, CEPH_POINT_RELEASE=-17.2.0, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, io.openshift.expose-services=, maintainer=Guillaume Abrioux , summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, vcs-type=git, architecture=x86_64, build-date=2022-05-03T08:36:31.336870, release=754, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, GIT_CLEAN=True, io.openshift.tags=base centos centos-stream, vendor=Red Hat, Inc., io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, ceph=True, distribution-scope=public, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, io.k8s.display-name=CentOS Stream 8, RELEASE=HEAD, name=centos-stream, version=8, GIT_BRANCH=HEAD, GIT_REPO=https://github.com/ceph/ceph-container.git) 2026-03-10T07:03:21.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:20 vm02 podman[86025]: 2026-03-10 07:03:20.850743468 +0000 UTC m=+0.067442516 container remove 16a124fbc55b12d938ec5e592908f406ffd1abd96c7ef7ad8693077cd4baff2c (image=quay.io/ceph/ceph:v17.2.0, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mon-a, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, vcs-type=git, CEPH_POINT_RELEASE=-17.2.0, GIT_CLEAN=True, io.k8s.display-name=CentOS Stream 8, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, ceph=True, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., maintainer=Guillaume Abrioux , vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, build-date=2022-05-03T08:36:31.336870, com.redhat.component=centos-stream-container, GIT_REPO=https://github.com/ceph/ceph-container.git, GIT_BRANCH=HEAD, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, io.openshift.tags=base centos centos-stream, version=8, name=centos-stream, vendor=Red Hat, Inc., com.redhat.license_terms=https://centos.org/legal/licensing-policy/, distribution-scope=public, io.buildah.version=1.19.8, io.openshift.expose-services=, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, RELEASE=HEAD, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, release=754, architecture=x86_64) 2026-03-10T07:03:21.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:20 vm02 bash[86025]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mon-a 2026-03-10T07:03:21.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:20 vm02 systemd[1]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@mon.a.service: Deactivated successfully. 2026-03-10T07:03:21.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:20 vm02 systemd[1]: Stopped Ceph mon.a for 28bd35e6-1c4e-11f1-9057-21b3549603fc. 2026-03-10T07:03:21.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:20 vm02 systemd[1]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@mon.a.service: Consumed 7.604s CPU time. 2026-03-10T07:03:21.574 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 systemd[1]: Starting Ceph mon.a for 28bd35e6-1c4e-11f1-9057-21b3549603fc... 2026-03-10T07:03:21.575 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 podman[86135]: 2026-03-10 07:03:21.204974106 +0000 UTC m=+0.020187087 container create 194e84dd73c43068846817edb0954849afccb82ab0efb5e44febc668dbcbdc63 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mon-a, io.buildah.version=1.41.3, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.build-date=20260223, CEPH_REF=squid, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-10T07:03:21.575 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 podman[86135]: 2026-03-10 07:03:21.242953954 +0000 UTC m=+0.058166935 container init 194e84dd73c43068846817edb0954849afccb82ab0efb5e44febc668dbcbdc63 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mon-a, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, io.buildah.version=1.41.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team , ceph=True, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.build-date=20260223, CEPH_REF=squid, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9) 2026-03-10T07:03:21.575 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 podman[86135]: 2026-03-10 07:03:21.246025013 +0000 UTC m=+0.061237994 container start 194e84dd73c43068846817edb0954849afccb82ab0efb5e44febc668dbcbdc63 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mon-a, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team , io.buildah.version=1.41.3, CEPH_REF=squid, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True, org.label-schema.build-date=20260223) 2026-03-10T07:03:21.575 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 bash[86135]: 194e84dd73c43068846817edb0954849afccb82ab0efb5e44febc668dbcbdc63 2026-03-10T07:03:21.575 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 podman[86135]: 2026-03-10 07:03:21.197061455 +0000 UTC m=+0.012274447 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T07:03:21.575 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 systemd[1]: Started Ceph mon.a for 28bd35e6-1c4e-11f1-9057-21b3549603fc. 2026-03-10T07:03:21.575 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: set uid:gid to 167:167 (ceph:ceph) 2026-03-10T07:03:21.575 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable), process ceph-mon, pid 2 2026-03-10T07:03:21.575 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: pidfile_write: ignore empty --pid-file 2026-03-10T07:03:21.575 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: load: jerasure load: lrc 2026-03-10T07:03:21.575 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: RocksDB version: 7.9.2 2026-03-10T07:03:21.575 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Git sha 0 2026-03-10T07:03:21.575 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Compile date 2026-02-25 18:11:04 2026-03-10T07:03:21.575 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: DB SUMMARY 2026-03-10T07:03:21.575 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: DB Session ID: JJ8Z65ZSFJ3KDTEJ3V7I 2026-03-10T07:03:21.575 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: CURRENT file: CURRENT 2026-03-10T07:03:21.575 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: IDENTITY file: IDENTITY 2026-03-10T07:03:21.575 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: MANIFEST file: MANIFEST-000015 size: 392 Bytes 2026-03-10T07:03:21.575 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: SST files in /var/lib/ceph/mon/ceph-a/store.db dir, Total Num: 1, files: 000021.sst 2026-03-10T07:03:21.575 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Write Ahead Log file in /var/lib/ceph/mon/ceph-a/store.db: 000019.log size: 5048681 ; 2026-03-10T07:03:21.575 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.error_if_exists: 0 2026-03-10T07:03:21.575 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.create_if_missing: 0 2026-03-10T07:03:21.575 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.paranoid_checks: 1 2026-03-10T07:03:21.575 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.flush_verify_memtable_count: 1 2026-03-10T07:03:21.575 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.track_and_verify_wals_in_manifest: 0 2026-03-10T07:03:21.575 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.verify_sst_unique_id_in_manifest: 1 2026-03-10T07:03:21.575 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.env: 0x561464933dc0 2026-03-10T07:03:21.575 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.fs: PosixFileSystem 2026-03-10T07:03:21.576 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.info_log: 0x561466cc65c0 2026-03-10T07:03:21.576 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.max_file_opening_threads: 16 2026-03-10T07:03:21.576 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.statistics: (nil) 2026-03-10T07:03:21.576 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.use_fsync: 0 2026-03-10T07:03:21.576 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.max_log_file_size: 0 2026-03-10T07:03:21.576 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.max_manifest_file_size: 1073741824 2026-03-10T07:03:21.576 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.log_file_time_to_roll: 0 2026-03-10T07:03:21.576 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.keep_log_file_num: 1000 2026-03-10T07:03:21.576 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.recycle_log_file_num: 0 2026-03-10T07:03:21.576 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.allow_fallocate: 1 2026-03-10T07:03:21.576 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.allow_mmap_reads: 0 2026-03-10T07:03:21.576 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.allow_mmap_writes: 0 2026-03-10T07:03:21.576 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.use_direct_reads: 0 2026-03-10T07:03:21.576 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.use_direct_io_for_flush_and_compaction: 0 2026-03-10T07:03:21.576 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.create_missing_column_families: 0 2026-03-10T07:03:21.576 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.db_log_dir: 2026-03-10T07:03:21.576 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.wal_dir: 2026-03-10T07:03:21.576 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.table_cache_numshardbits: 6 2026-03-10T07:03:21.576 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.WAL_ttl_seconds: 0 2026-03-10T07:03:21.576 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.WAL_size_limit_MB: 0 2026-03-10T07:03:21.576 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.max_write_batch_group_size_bytes: 1048576 2026-03-10T07:03:21.576 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.manifest_preallocation_size: 4194304 2026-03-10T07:03:21.576 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.is_fd_close_on_exec: 1 2026-03-10T07:03:21.576 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.advise_random_on_open: 1 2026-03-10T07:03:21.576 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.db_write_buffer_size: 0 2026-03-10T07:03:21.576 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.write_buffer_manager: 0x561466ccb900 2026-03-10T07:03:21.576 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.access_hint_on_compaction_start: 1 2026-03-10T07:03:21.576 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.random_access_max_buffer_size: 1048576 2026-03-10T07:03:21.576 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.use_adaptive_mutex: 0 2026-03-10T07:03:21.576 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.rate_limiter: (nil) 2026-03-10T07:03:21.576 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.sst_file_manager.rate_bytes_per_sec: 0 2026-03-10T07:03:21.576 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.wal_recovery_mode: 2 2026-03-10T07:03:21.576 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.enable_thread_tracking: 0 2026-03-10T07:03:21.576 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.enable_pipelined_write: 0 2026-03-10T07:03:21.577 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.unordered_write: 0 2026-03-10T07:03:21.577 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.allow_concurrent_memtable_write: 1 2026-03-10T07:03:21.577 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.enable_write_thread_adaptive_yield: 1 2026-03-10T07:03:21.577 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.write_thread_max_yield_usec: 100 2026-03-10T07:03:21.577 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.write_thread_slow_yield_usec: 3 2026-03-10T07:03:21.577 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.row_cache: None 2026-03-10T07:03:21.577 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.wal_filter: None 2026-03-10T07:03:21.577 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.avoid_flush_during_recovery: 0 2026-03-10T07:03:21.577 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.allow_ingest_behind: 0 2026-03-10T07:03:21.577 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.two_write_queues: 0 2026-03-10T07:03:21.577 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.manual_wal_flush: 0 2026-03-10T07:03:21.577 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.wal_compression: 0 2026-03-10T07:03:21.577 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.atomic_flush: 0 2026-03-10T07:03:21.577 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.avoid_unnecessary_blocking_io: 0 2026-03-10T07:03:21.577 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.persist_stats_to_disk: 0 2026-03-10T07:03:21.577 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.write_dbid_to_manifest: 0 2026-03-10T07:03:21.577 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.log_readahead_size: 0 2026-03-10T07:03:21.577 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.file_checksum_gen_factory: Unknown 2026-03-10T07:03:21.577 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.best_efforts_recovery: 0 2026-03-10T07:03:21.577 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.max_bgerror_resume_count: 2147483647 2026-03-10T07:03:21.577 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.bgerror_resume_retry_interval: 1000000 2026-03-10T07:03:21.577 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.allow_data_in_errors: 0 2026-03-10T07:03:21.577 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.db_host_id: __hostname__ 2026-03-10T07:03:21.577 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.enforce_single_del_contracts: true 2026-03-10T07:03:21.577 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.max_background_jobs: 2 2026-03-10T07:03:21.577 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.max_background_compactions: -1 2026-03-10T07:03:21.577 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.max_subcompactions: 1 2026-03-10T07:03:21.577 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.avoid_flush_during_shutdown: 0 2026-03-10T07:03:21.577 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.writable_file_max_buffer_size: 1048576 2026-03-10T07:03:21.577 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.delayed_write_rate : 16777216 2026-03-10T07:03:21.577 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.max_total_wal_size: 0 2026-03-10T07:03:21.577 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.delete_obsolete_files_period_micros: 21600000000 2026-03-10T07:03:21.577 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.stats_dump_period_sec: 600 2026-03-10T07:03:21.577 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.stats_persist_period_sec: 600 2026-03-10T07:03:21.577 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.stats_history_buffer_size: 1048576 2026-03-10T07:03:21.578 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.max_open_files: -1 2026-03-10T07:03:21.578 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.bytes_per_sync: 0 2026-03-10T07:03:21.578 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.wal_bytes_per_sync: 0 2026-03-10T07:03:21.578 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.strict_bytes_per_sync: 0 2026-03-10T07:03:21.578 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.compaction_readahead_size: 0 2026-03-10T07:03:21.578 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.max_background_flushes: -1 2026-03-10T07:03:21.578 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Compression algorithms supported: 2026-03-10T07:03:21.578 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: kZSTD supported: 0 2026-03-10T07:03:21.578 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: kXpressCompression supported: 0 2026-03-10T07:03:21.578 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: kBZip2Compression supported: 0 2026-03-10T07:03:21.578 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: kZSTDNotFinalCompression supported: 0 2026-03-10T07:03:21.578 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: kLZ4Compression supported: 1 2026-03-10T07:03:21.578 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: kZlibCompression supported: 1 2026-03-10T07:03:21.578 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: kLZ4HCCompression supported: 1 2026-03-10T07:03:21.578 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: kSnappyCompression supported: 1 2026-03-10T07:03:21.578 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Fast CRC32 supported: Supported on x86 2026-03-10T07:03:21.578 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: DMutex implementation: pthread_mutex_t 2026-03-10T07:03:21.578 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: [db/version_set.cc:5527] Recovering from manifest file: /var/lib/ceph/mon/ceph-a/store.db/MANIFEST-000015 2026-03-10T07:03:21.578 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [default]: 2026-03-10T07:03:21.578 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.comparator: leveldb.BytewiseComparator 2026-03-10T07:03:21.578 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.merge_operator: 2026-03-10T07:03:21.578 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.compaction_filter: None 2026-03-10T07:03:21.578 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.compaction_filter_factory: None 2026-03-10T07:03:21.578 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.sst_partitioner_factory: None 2026-03-10T07:03:21.578 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.memtable_factory: SkipListFactory 2026-03-10T07:03:21.578 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.table_factory: BlockBasedTable 2026-03-10T07:03:21.578 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: table_factory options: flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x561466cc65a0) 2026-03-10T07:03:21.578 INFO:journalctl@ceph.mon.a.vm02.stdout: cache_index_and_filter_blocks: 1 2026-03-10T07:03:21.578 INFO:journalctl@ceph.mon.a.vm02.stdout: cache_index_and_filter_blocks_with_high_priority: 0 2026-03-10T07:03:21.578 INFO:journalctl@ceph.mon.a.vm02.stdout: pin_l0_filter_and_index_blocks_in_cache: 0 2026-03-10T07:03:21.578 INFO:journalctl@ceph.mon.a.vm02.stdout: pin_top_level_index_and_filter: 1 2026-03-10T07:03:21.578 INFO:journalctl@ceph.mon.a.vm02.stdout: index_type: 0 2026-03-10T07:03:21.578 INFO:journalctl@ceph.mon.a.vm02.stdout: data_block_index_type: 0 2026-03-10T07:03:21.578 INFO:journalctl@ceph.mon.a.vm02.stdout: index_shortening: 1 2026-03-10T07:03:21.578 INFO:journalctl@ceph.mon.a.vm02.stdout: data_block_hash_table_util_ratio: 0.750000 2026-03-10T07:03:21.579 INFO:journalctl@ceph.mon.a.vm02.stdout: checksum: 4 2026-03-10T07:03:21.579 INFO:journalctl@ceph.mon.a.vm02.stdout: no_block_cache: 0 2026-03-10T07:03:21.579 INFO:journalctl@ceph.mon.a.vm02.stdout: block_cache: 0x561466ceb350 2026-03-10T07:03:21.579 INFO:journalctl@ceph.mon.a.vm02.stdout: block_cache_name: BinnedLRUCache 2026-03-10T07:03:21.579 INFO:journalctl@ceph.mon.a.vm02.stdout: block_cache_options: 2026-03-10T07:03:21.579 INFO:journalctl@ceph.mon.a.vm02.stdout: capacity : 536870912 2026-03-10T07:03:21.579 INFO:journalctl@ceph.mon.a.vm02.stdout: num_shard_bits : 4 2026-03-10T07:03:21.579 INFO:journalctl@ceph.mon.a.vm02.stdout: strict_capacity_limit : 0 2026-03-10T07:03:21.579 INFO:journalctl@ceph.mon.a.vm02.stdout: high_pri_pool_ratio: 0.000 2026-03-10T07:03:21.579 INFO:journalctl@ceph.mon.a.vm02.stdout: block_cache_compressed: (nil) 2026-03-10T07:03:21.579 INFO:journalctl@ceph.mon.a.vm02.stdout: persistent_cache: (nil) 2026-03-10T07:03:21.579 INFO:journalctl@ceph.mon.a.vm02.stdout: block_size: 4096 2026-03-10T07:03:21.579 INFO:journalctl@ceph.mon.a.vm02.stdout: block_size_deviation: 10 2026-03-10T07:03:21.579 INFO:journalctl@ceph.mon.a.vm02.stdout: block_restart_interval: 16 2026-03-10T07:03:21.579 INFO:journalctl@ceph.mon.a.vm02.stdout: index_block_restart_interval: 1 2026-03-10T07:03:21.579 INFO:journalctl@ceph.mon.a.vm02.stdout: metadata_block_size: 4096 2026-03-10T07:03:21.579 INFO:journalctl@ceph.mon.a.vm02.stdout: partition_filters: 0 2026-03-10T07:03:21.579 INFO:journalctl@ceph.mon.a.vm02.stdout: use_delta_encoding: 1 2026-03-10T07:03:21.579 INFO:journalctl@ceph.mon.a.vm02.stdout: filter_policy: bloomfilter 2026-03-10T07:03:21.579 INFO:journalctl@ceph.mon.a.vm02.stdout: whole_key_filtering: 1 2026-03-10T07:03:21.579 INFO:journalctl@ceph.mon.a.vm02.stdout: verify_compression: 0 2026-03-10T07:03:21.579 INFO:journalctl@ceph.mon.a.vm02.stdout: read_amp_bytes_per_bit: 0 2026-03-10T07:03:21.579 INFO:journalctl@ceph.mon.a.vm02.stdout: format_version: 5 2026-03-10T07:03:21.579 INFO:journalctl@ceph.mon.a.vm02.stdout: enable_index_compression: 1 2026-03-10T07:03:21.579 INFO:journalctl@ceph.mon.a.vm02.stdout: block_align: 0 2026-03-10T07:03:21.579 INFO:journalctl@ceph.mon.a.vm02.stdout: max_auto_readahead_size: 262144 2026-03-10T07:03:21.579 INFO:journalctl@ceph.mon.a.vm02.stdout: prepopulate_block_cache: 0 2026-03-10T07:03:21.579 INFO:journalctl@ceph.mon.a.vm02.stdout: initial_auto_readahead_size: 8192 2026-03-10T07:03:21.579 INFO:journalctl@ceph.mon.a.vm02.stdout: num_file_reads_for_auto_readahead: 2 2026-03-10T07:03:21.579 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.write_buffer_size: 33554432 2026-03-10T07:03:21.579 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.max_write_buffer_number: 2 2026-03-10T07:03:21.579 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.compression: NoCompression 2026-03-10T07:03:21.580 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.bottommost_compression: Disabled 2026-03-10T07:03:21.580 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.prefix_extractor: nullptr 2026-03-10T07:03:21.580 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.memtable_insert_with_hint_prefix_extractor: nullptr 2026-03-10T07:03:21.580 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.num_levels: 7 2026-03-10T07:03:21.580 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.min_write_buffer_number_to_merge: 1 2026-03-10T07:03:21.580 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.max_write_buffer_number_to_maintain: 0 2026-03-10T07:03:21.580 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.max_write_buffer_size_to_maintain: 0 2026-03-10T07:03:21.580 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.bottommost_compression_opts.window_bits: -14 2026-03-10T07:03:21.580 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.bottommost_compression_opts.level: 32767 2026-03-10T07:03:21.580 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.bottommost_compression_opts.strategy: 0 2026-03-10T07:03:21.580 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.bottommost_compression_opts.max_dict_bytes: 0 2026-03-10T07:03:21.580 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.bottommost_compression_opts.zstd_max_train_bytes: 0 2026-03-10T07:03:21.580 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.bottommost_compression_opts.parallel_threads: 1 2026-03-10T07:03:21.580 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.bottommost_compression_opts.enabled: false 2026-03-10T07:03:21.580 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.bottommost_compression_opts.max_dict_buffer_bytes: 0 2026-03-10T07:03:21.580 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.bottommost_compression_opts.use_zstd_dict_trainer: true 2026-03-10T07:03:21.580 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.compression_opts.window_bits: -14 2026-03-10T07:03:21.580 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.compression_opts.level: 32767 2026-03-10T07:03:21.580 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.compression_opts.strategy: 0 2026-03-10T07:03:21.580 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.compression_opts.max_dict_bytes: 0 2026-03-10T07:03:21.580 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.compression_opts.zstd_max_train_bytes: 0 2026-03-10T07:03:21.580 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.compression_opts.use_zstd_dict_trainer: true 2026-03-10T07:03:21.580 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.compression_opts.parallel_threads: 1 2026-03-10T07:03:21.580 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.compression_opts.enabled: false 2026-03-10T07:03:21.580 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.compression_opts.max_dict_buffer_bytes: 0 2026-03-10T07:03:21.580 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.level0_file_num_compaction_trigger: 4 2026-03-10T07:03:21.580 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.level0_slowdown_writes_trigger: 20 2026-03-10T07:03:21.580 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.level0_stop_writes_trigger: 36 2026-03-10T07:03:21.580 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.target_file_size_base: 67108864 2026-03-10T07:03:21.580 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.target_file_size_multiplier: 1 2026-03-10T07:03:21.580 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.max_bytes_for_level_base: 268435456 2026-03-10T07:03:21.580 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.level_compaction_dynamic_level_bytes: 1 2026-03-10T07:03:21.580 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.max_bytes_for_level_multiplier: 10.000000 2026-03-10T07:03:21.580 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1 2026-03-10T07:03:21.580 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1 2026-03-10T07:03:21.581 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1 2026-03-10T07:03:21.581 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1 2026-03-10T07:03:21.581 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1 2026-03-10T07:03:21.581 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1 2026-03-10T07:03:21.581 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1 2026-03-10T07:03:21.581 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.max_sequential_skip_in_iterations: 8 2026-03-10T07:03:21.581 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.max_compaction_bytes: 1677721600 2026-03-10T07:03:21.581 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.ignore_max_compaction_bytes_for_input: true 2026-03-10T07:03:21.581 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.arena_block_size: 1048576 2026-03-10T07:03:21.581 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.soft_pending_compaction_bytes_limit: 68719476736 2026-03-10T07:03:21.581 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.hard_pending_compaction_bytes_limit: 274877906944 2026-03-10T07:03:21.581 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.disable_auto_compactions: 0 2026-03-10T07:03:21.581 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.compaction_style: kCompactionStyleLevel 2026-03-10T07:03:21.581 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.compaction_pri: kMinOverlappingRatio 2026-03-10T07:03:21.581 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.compaction_options_universal.size_ratio: 1 2026-03-10T07:03:21.581 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.compaction_options_universal.min_merge_width: 2 2026-03-10T07:03:21.581 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295 2026-03-10T07:03:21.581 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200 2026-03-10T07:03:21.581 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1 2026-03-10T07:03:21.581 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize 2026-03-10T07:03:21.581 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824 2026-03-10T07:03:21.581 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0 2026-03-10T07:03:21.581 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0); 2026-03-10T07:03:21.581 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.inplace_update_support: 0 2026-03-10T07:03:21.581 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.inplace_update_num_locks: 10000 2026-03-10T07:03:21.581 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.memtable_prefix_bloom_size_ratio: 0.000000 2026-03-10T07:03:21.581 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.memtable_whole_key_filtering: 0 2026-03-10T07:03:21.581 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.memtable_huge_page_size: 0 2026-03-10T07:03:21.581 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.bloom_locality: 0 2026-03-10T07:03:21.581 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.max_successive_merges: 0 2026-03-10T07:03:21.581 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.optimize_filters_for_hits: 0 2026-03-10T07:03:21.581 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.paranoid_file_checks: 0 2026-03-10T07:03:21.581 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.force_consistency_checks: 1 2026-03-10T07:03:21.581 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.report_bg_io_stats: 0 2026-03-10T07:03:21.581 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.ttl: 2592000 2026-03-10T07:03:21.581 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.periodic_compaction_seconds: 0 2026-03-10T07:03:21.581 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.preclude_last_level_data_seconds: 0 2026-03-10T07:03:21.581 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.preserve_internal_time_seconds: 0 2026-03-10T07:03:21.581 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.enable_blob_files: false 2026-03-10T07:03:21.581 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.min_blob_size: 0 2026-03-10T07:03:21.581 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.blob_file_size: 268435456 2026-03-10T07:03:21.581 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.blob_compression_type: NoCompression 2026-03-10T07:03:21.582 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.enable_blob_garbage_collection: false 2026-03-10T07:03:21.582 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.blob_garbage_collection_age_cutoff: 0.250000 2026-03-10T07:03:21.582 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000 2026-03-10T07:03:21.582 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.blob_compaction_readahead_size: 0 2026-03-10T07:03:21.582 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.blob_file_starting_level: 0 2026-03-10T07:03:21.582 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: Options.experimental_mempurge_threshold: 0.000000 2026-03-10T07:03:21.582 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: [table/block_based/block_based_table_reader.cc:721] At least one SST file opened without unique ID to verify: 21.sst 2026-03-10T07:03:21.582 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed. 2026-03-10T07:03:21.582 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: [db/version_set.cc:5566] Recovered from manifest file:/var/lib/ceph/mon/ceph-a/store.db/MANIFEST-000015 succeeded,manifest_file_number is 15, next_file_number is 23, last_sequence is 7370, log_number is 19,prev_log_number is 0,max_column_family is 0,min_log_number_to_keep is 0 2026-03-10T07:03:21.582 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: [db/version_set.cc:5581] Column family [default] (ID 0), log number is 19 2026-03-10T07:03:21.582 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: [db/db_impl/db_impl_open.cc:539] DB ID: 4593044f-392f-414a-bc3f-f67427ad16aa 2026-03-10T07:03:21.582 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: EVENT_LOG_v1 {"time_micros": 1773126201281195, "job": 1, "event": "recovery_started", "wal_files": [19]} 2026-03-10T07:03:21.582 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: [db/db_impl/db_impl_open.cc:1043] Recovering log #19 mode 2 2026-03-10T07:03:21.582 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: EVENT_LOG_v1 {"time_micros": 1773126201299602, "cf_name": "default", "job": 1, "event": "table_file_creation", "file_number": 24, "file_size": 4364683, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 7371, "largest_seqno": 9279, "table_properties": {"data_size": 4355166, "index_size": 6236, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 2245, "raw_key_size": 20996, "raw_average_key_size": 24, "raw_value_size": 4335779, "raw_average_value_size": 4977, "num_data_blocks": 292, "num_entries": 871, "num_filter_entries": 871, "num_deletions": 2, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1773126201, "oldest_key_time": 0, "file_creation_time": 0, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "4593044f-392f-414a-bc3f-f67427ad16aa", "db_session_id": "JJ8Z65ZSFJ3KDTEJ3V7I", "orig_file_number": 24, "seqno_to_time_mapping": "N/A"}} 2026-03-10T07:03:21.582 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: EVENT_LOG_v1 {"time_micros": 1773126201299912, "job": 1, "event": "recovery_finished"} 2026-03-10T07:03:21.582 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: [db/version_set.cc:5047] Creating manifest 26 2026-03-10T07:03:21.582 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed. 2026-03-10T07:03:21.582 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-a/store.db/000019.log immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000 2026-03-10T07:03:21.582 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: [db/db_impl/db_impl_open.cc:1987] SstFileManager instance 0x561466cece00 2026-03-10T07:03:21.582 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: DB pointer 0x561466e02000 2026-03-10T07:03:21.582 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS ------- 2026-03-10T07:03:21.582 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: rocksdb: [db/db_impl/db_impl.cc:1111] 2026-03-10T07:03:21.582 INFO:journalctl@ceph.mon.a.vm02.stdout: ** DB Stats ** 2026-03-10T07:03:21.582 INFO:journalctl@ceph.mon.a.vm02.stdout: Uptime(secs): 0.0 total, 0.0 interval 2026-03-10T07:03:21.582 INFO:journalctl@ceph.mon.a.vm02.stdout: Cumulative writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 GB, 0.00 MB/s 2026-03-10T07:03:21.582 INFO:journalctl@ceph.mon.a.vm02.stdout: Cumulative WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s 2026-03-10T07:03:21.582 INFO:journalctl@ceph.mon.a.vm02.stdout: Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent 2026-03-10T07:03:21.582 INFO:journalctl@ceph.mon.a.vm02.stdout: Interval writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 MB, 0.00 MB/s 2026-03-10T07:03:21.582 INFO:journalctl@ceph.mon.a.vm02.stdout: Interval WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s 2026-03-10T07:03:21.582 INFO:journalctl@ceph.mon.a.vm02.stdout: Interval stall: 00:00:0.000 H:M:S, 0.0 percent 2026-03-10T07:03:21.582 INFO:journalctl@ceph.mon.a.vm02.stdout: 2026-03-10T07:03:21.582 INFO:journalctl@ceph.mon.a.vm02.stdout: ** Compaction Stats [default] ** 2026-03-10T07:03:21.582 INFO:journalctl@ceph.mon.a.vm02.stdout: Level Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB) 2026-03-10T07:03:21.582 INFO:journalctl@ceph.mon.a.vm02.stdout: ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ 2026-03-10T07:03:21.582 INFO:journalctl@ceph.mon.a.vm02.stdout: L0 1/0 4.16 MB 0.2 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 299.3 0.01 0.00 1 0.014 0 0 0.0 0.0 2026-03-10T07:03:21.582 INFO:journalctl@ceph.mon.a.vm02.stdout: L6 1/0 7.78 MB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.00 0.00 0 0.000 0 0 0.0 0.0 2026-03-10T07:03:21.582 INFO:journalctl@ceph.mon.a.vm02.stdout: Sum 2/0 11.94 MB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 299.3 0.01 0.00 1 0.014 0 0 0.0 0.0 2026-03-10T07:03:21.582 INFO:journalctl@ceph.mon.a.vm02.stdout: Int 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 299.3 0.01 0.00 1 0.014 0 0 0.0 0.0 2026-03-10T07:03:21.582 INFO:journalctl@ceph.mon.a.vm02.stdout: 2026-03-10T07:03:21.582 INFO:journalctl@ceph.mon.a.vm02.stdout: ** Compaction Stats [default] ** 2026-03-10T07:03:21.582 INFO:journalctl@ceph.mon.a.vm02.stdout: Priority Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB) 2026-03-10T07:03:21.582 INFO:journalctl@ceph.mon.a.vm02.stdout: --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 2026-03-10T07:03:21.582 INFO:journalctl@ceph.mon.a.vm02.stdout: User 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 299.3 0.01 0.00 1 0.014 0 0 0.0 0.0 2026-03-10T07:03:21.582 INFO:journalctl@ceph.mon.a.vm02.stdout: 2026-03-10T07:03:21.582 INFO:journalctl@ceph.mon.a.vm02.stdout: Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0 2026-03-10T07:03:21.582 INFO:journalctl@ceph.mon.a.vm02.stdout: 2026-03-10T07:03:21.582 INFO:journalctl@ceph.mon.a.vm02.stdout: Uptime(secs): 0.0 total, 0.0 interval 2026-03-10T07:03:21.582 INFO:journalctl@ceph.mon.a.vm02.stdout: Flush(GB): cumulative 0.004, interval 0.004 2026-03-10T07:03:21.582 INFO:journalctl@ceph.mon.a.vm02.stdout: AddFile(GB): cumulative 0.000, interval 0.000 2026-03-10T07:03:21.582 INFO:journalctl@ceph.mon.a.vm02.stdout: AddFile(Total Files): cumulative 0, interval 0 2026-03-10T07:03:21.582 INFO:journalctl@ceph.mon.a.vm02.stdout: AddFile(L0 Files): cumulative 0, interval 0 2026-03-10T07:03:21.582 INFO:journalctl@ceph.mon.a.vm02.stdout: AddFile(Keys): cumulative 0, interval 0 2026-03-10T07:03:21.582 INFO:journalctl@ceph.mon.a.vm02.stdout: Cumulative compaction: 0.00 GB write, 141.74 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-10T07:03:21.582 INFO:journalctl@ceph.mon.a.vm02.stdout: Interval compaction: 0.00 GB write, 141.74 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-10T07:03:21.583 INFO:journalctl@ceph.mon.a.vm02.stdout: Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count 2026-03-10T07:03:21.583 INFO:journalctl@ceph.mon.a.vm02.stdout: Block cache BinnedLRUCache@0x561466ceb350#2 capacity: 512.00 MB usage: 8.86 KB table_size: 0 occupancy: 18446744073709551615 collections: 1 last_copies: 0 last_secs: 1.2e-05 secs_since: 0 2026-03-10T07:03:21.583 INFO:journalctl@ceph.mon.a.vm02.stdout: Block cache entry stats(count,size,portion): FilterBlock(1,2.28 KB,0.000435114%) IndexBlock(1,6.58 KB,0.00125468%) Misc(1,0.00 KB,0%) 2026-03-10T07:03:21.583 INFO:journalctl@ceph.mon.a.vm02.stdout: 2026-03-10T07:03:21.583 INFO:journalctl@ceph.mon.a.vm02.stdout: ** File Read Latency Histogram By Level [default] ** 2026-03-10T07:03:21.583 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: starting mon.a rank 0 at public addrs [v2:192.168.123.102:3300/0,v1:192.168.123.102:6789/0] at bind addrs [v2:192.168.123.102:3300/0,v1:192.168.123.102:6789/0] mon_data /var/lib/ceph/mon/ceph-a fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc 2026-03-10T07:03:21.583 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: mon.a@-1(???) e3 preinit fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc 2026-03-10T07:03:21.583 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: mon.a@-1(???).mds e1 new map 2026-03-10T07:03:21.583 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: mon.a@-1(???).mds e1 print_map 2026-03-10T07:03:21.583 INFO:journalctl@ceph.mon.a.vm02.stdout: e1 2026-03-10T07:03:21.583 INFO:journalctl@ceph.mon.a.vm02.stdout: btime 1970-01-01T00:00:00:000000+0000 2026-03-10T07:03:21.583 INFO:journalctl@ceph.mon.a.vm02.stdout: enable_multiple, ever_enabled_multiple: 1,1 2026-03-10T07:03:21.583 INFO:journalctl@ceph.mon.a.vm02.stdout: default compat: compat={},rocompat={},incompat={1=base v0.20,2=client writeable ranges,3=default file layouts on dirs,4=dir inode in separate object,5=mds uses versioned encoding,6=dirfrag is stored in omap,8=no anchor table,9=file layout v2,10=snaprealm v2} 2026-03-10T07:03:21.583 INFO:journalctl@ceph.mon.a.vm02.stdout: legacy client fscid: -1 2026-03-10T07:03:21.583 INFO:journalctl@ceph.mon.a.vm02.stdout: 2026-03-10T07:03:21.583 INFO:journalctl@ceph.mon.a.vm02.stdout: No filesystems configured 2026-03-10T07:03:21.583 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: mon.a@-1(???).osd e85 crush map has features 3314933000854323200, adjusting msgr requires 2026-03-10T07:03:21.583 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: mon.a@-1(???).osd e85 crush map has features 432629239337189376, adjusting msgr requires 2026-03-10T07:03:21.583 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: mon.a@-1(???).osd e85 crush map has features 432629239337189376, adjusting msgr requires 2026-03-10T07:03:21.583 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: mon.a@-1(???).osd e85 crush map has features 432629239337189376, adjusting msgr requires 2026-03-10T07:03:21.583 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: mon.a@-1(???).paxosservice(auth 1..20) refresh upgraded, format 0 -> 3 2026-03-10T07:03:21.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[54377]: Upgrade: Updating mon.a 2026-03-10T07:03:21.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[54377]: Deploying daemon mon.a on vm02 2026-03-10T07:03:21.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[54377]: pgmap v9: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 17 KiB/s rd, 0 B/s wr, 7 op/s 2026-03-10T07:03:21.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[54377]: mon.a calling monitor election 2026-03-10T07:03:21.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[54377]: mon.a is new leader, mons a,c,b in quorum (ranks 0,1,2) 2026-03-10T07:03:21.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[54377]: monmap epoch 3 2026-03-10T07:03:21.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[54377]: fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc 2026-03-10T07:03:21.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[54377]: last_changed 2026-03-10T06:56:52.872880+0000 2026-03-10T07:03:21.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[54377]: created 2026-03-10T06:56:10.631622+0000 2026-03-10T07:03:21.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[54377]: min_mon_release 17 (quincy) 2026-03-10T07:03:21.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[54377]: election_strategy: 1 2026-03-10T07:03:21.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[54377]: 0: [v2:192.168.123.102:3300/0,v1:192.168.123.102:6789/0] mon.a 2026-03-10T07:03:21.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[54377]: 1: [v2:192.168.123.102:3301/0,v1:192.168.123.102:6790/0] mon.c 2026-03-10T07:03:21.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[54377]: 2: [v2:192.168.123.105:3300/0,v1:192.168.123.105:6789/0] mon.b 2026-03-10T07:03:21.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[54377]: fsmap 2026-03-10T07:03:21.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[54377]: osdmap e85: 8 total, 8 up, 8 in 2026-03-10T07:03:21.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[54377]: mgrmap e28: y(active, since 13s), standbys: x 2026-03-10T07:03:21.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[54377]: overall HEALTH_OK 2026-03-10T07:03:21.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[54377]: from='mgr.25015 ' entity='' 2026-03-10T07:03:21.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[54377]: mgrmap e29: y(active, since 13s), standbys: x 2026-03-10T07:03:21.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: Upgrade: Updating mon.a 2026-03-10T07:03:21.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: Deploying daemon mon.a on vm02 2026-03-10T07:03:21.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: pgmap v9: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 17 KiB/s rd, 0 B/s wr, 7 op/s 2026-03-10T07:03:21.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: mon.a calling monitor election 2026-03-10T07:03:21.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: mon.a is new leader, mons a,c,b in quorum (ranks 0,1,2) 2026-03-10T07:03:21.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: monmap epoch 3 2026-03-10T07:03:21.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc 2026-03-10T07:03:21.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: last_changed 2026-03-10T06:56:52.872880+0000 2026-03-10T07:03:21.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: created 2026-03-10T06:56:10.631622+0000 2026-03-10T07:03:21.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: min_mon_release 17 (quincy) 2026-03-10T07:03:21.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: election_strategy: 1 2026-03-10T07:03:21.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: 0: [v2:192.168.123.102:3300/0,v1:192.168.123.102:6789/0] mon.a 2026-03-10T07:03:21.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: 1: [v2:192.168.123.102:3301/0,v1:192.168.123.102:6790/0] mon.c 2026-03-10T07:03:21.836 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: 2: [v2:192.168.123.105:3300/0,v1:192.168.123.105:6789/0] mon.b 2026-03-10T07:03:21.836 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: fsmap 2026-03-10T07:03:21.836 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: osdmap e85: 8 total, 8 up, 8 in 2026-03-10T07:03:21.836 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: mgrmap e28: y(active, since 13s), standbys: x 2026-03-10T07:03:21.836 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: overall HEALTH_OK 2026-03-10T07:03:21.836 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: from='mgr.25015 ' entity='' 2026-03-10T07:03:21.836 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mon[86149]: mgrmap e29: y(active, since 13s), standbys: x 2026-03-10T07:03:21.836 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:21 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: ignoring --setuser ceph since I am not root 2026-03-10T07:03:21.836 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:21 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: ignoring --setgroup ceph since I am not root 2026-03-10T07:03:21.836 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:21 vm02 ceph-mgr[82616]: -- 192.168.123.102:0/2701350705 <== mon.1 v2:192.168.123.102:3301/0 4 ==== auth_reply(proto 2 0 (0) Success) ==== 194+0+0 (secure 0 0 0) 0x5574974b54a0 con 0x557497493000 2026-03-10T07:03:21.836 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:21 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: 2026-03-10T07:03:21.683+0000 7f03f4f4b140 -1 mgr[py] Module alerts has missing NOTIFY_TYPES member 2026-03-10T07:03:21.836 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:21 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: 2026-03-10T07:03:21.733+0000 7f03f4f4b140 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-10T07:03:22.003 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:21 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[73978]: ignoring --setuser ceph since I am not root 2026-03-10T07:03:22.003 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:21 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[73978]: ignoring --setgroup ceph since I am not root 2026-03-10T07:03:22.003 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:21 vm05 ceph-mgr[73982]: -- 192.168.123.105:0/2600569450 <== mon.1 v2:192.168.123.102:3301/0 4 ==== auth_reply(proto 2 0 (0) Success) ==== 194+0+0 (secure 0 0 0) 0x557ccb6ff4a0 con 0x557ccb6dd000 2026-03-10T07:03:22.003 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:21 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[73978]: 2026-03-10T07:03:21.650+0000 7f65d3082140 -1 mgr[py] Module alerts has missing NOTIFY_TYPES member 2026-03-10T07:03:22.003 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:21 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[73978]: 2026-03-10T07:03:21.690+0000 7f65d3082140 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-10T07:03:22.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:21 vm05 ceph-mon[48591]: Upgrade: Updating mon.a 2026-03-10T07:03:22.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:21 vm05 ceph-mon[48591]: Deploying daemon mon.a on vm02 2026-03-10T07:03:22.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:21 vm05 ceph-mon[48591]: pgmap v9: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 17 KiB/s rd, 0 B/s wr, 7 op/s 2026-03-10T07:03:22.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:21 vm05 ceph-mon[48591]: mon.a calling monitor election 2026-03-10T07:03:22.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:21 vm05 ceph-mon[48591]: mon.a is new leader, mons a,c,b in quorum (ranks 0,1,2) 2026-03-10T07:03:22.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:21 vm05 ceph-mon[48591]: monmap epoch 3 2026-03-10T07:03:22.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:21 vm05 ceph-mon[48591]: fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc 2026-03-10T07:03:22.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:21 vm05 ceph-mon[48591]: last_changed 2026-03-10T06:56:52.872880+0000 2026-03-10T07:03:22.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:21 vm05 ceph-mon[48591]: created 2026-03-10T06:56:10.631622+0000 2026-03-10T07:03:22.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:21 vm05 ceph-mon[48591]: min_mon_release 17 (quincy) 2026-03-10T07:03:22.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:21 vm05 ceph-mon[48591]: election_strategy: 1 2026-03-10T07:03:22.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:21 vm05 ceph-mon[48591]: 0: [v2:192.168.123.102:3300/0,v1:192.168.123.102:6789/0] mon.a 2026-03-10T07:03:22.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:21 vm05 ceph-mon[48591]: 1: [v2:192.168.123.102:3301/0,v1:192.168.123.102:6790/0] mon.c 2026-03-10T07:03:22.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:21 vm05 ceph-mon[48591]: 2: [v2:192.168.123.105:3300/0,v1:192.168.123.105:6789/0] mon.b 2026-03-10T07:03:22.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:21 vm05 ceph-mon[48591]: fsmap 2026-03-10T07:03:22.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:21 vm05 ceph-mon[48591]: osdmap e85: 8 total, 8 up, 8 in 2026-03-10T07:03:22.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:21 vm05 ceph-mon[48591]: mgrmap e28: y(active, since 13s), standbys: x 2026-03-10T07:03:22.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:21 vm05 ceph-mon[48591]: overall HEALTH_OK 2026-03-10T07:03:22.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:21 vm05 ceph-mon[48591]: from='mgr.25015 ' entity='' 2026-03-10T07:03:22.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:21 vm05 ceph-mon[48591]: mgrmap e29: y(active, since 13s), standbys: x 2026-03-10T07:03:22.487 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:22 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[73978]: 2026-03-10T07:03:22.124+0000 7f65d3082140 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-10T07:03:22.538 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:22 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: 2026-03-10T07:03:22.188+0000 7f03f4f4b140 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-10T07:03:22.753 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:22 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[73978]: 2026-03-10T07:03:22.485+0000 7f65d3082140 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-10T07:03:22.753 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:22 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[73978]: /lib64/python3.9/site-packages/scipy/__init__.py:73: UserWarning: NumPy was imported from a Python sub-interpreter but NumPy does not properly support sub-interpreters. This will likely work for most users but might cause hard to track down issues or subtle bugs. A common user of the rare sub-interpreter feature is wsgi which also allows single-interpreter mode. 2026-03-10T07:03:22.753 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:22 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[73978]: Improvements in the case of bugs are welcome, but is not on the NumPy roadmap, and full support may require significant effort to achieve. 2026-03-10T07:03:22.753 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:22 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[73978]: from numpy import show_config as show_numpy_config 2026-03-10T07:03:22.753 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:22 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[73978]: 2026-03-10T07:03:22.580+0000 7f65d3082140 -1 mgr[py] Module diskprediction_local has missing NOTIFY_TYPES member 2026-03-10T07:03:22.753 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:22 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[73978]: 2026-03-10T07:03:22.618+0000 7f65d3082140 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-10T07:03:22.753 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:22 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[73978]: 2026-03-10T07:03:22.700+0000 7f65d3082140 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-10T07:03:22.834 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:22 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: 2026-03-10T07:03:22.537+0000 7f03f4f4b140 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-10T07:03:22.834 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:22 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: /lib64/python3.9/site-packages/scipy/__init__.py:73: UserWarning: NumPy was imported from a Python sub-interpreter but NumPy does not properly support sub-interpreters. This will likely work for most users but might cause hard to track down issues or subtle bugs. A common user of the rare sub-interpreter feature is wsgi which also allows single-interpreter mode. 2026-03-10T07:03:22.834 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:22 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: Improvements in the case of bugs are welcome, but is not on the NumPy roadmap, and full support may require significant effort to achieve. 2026-03-10T07:03:22.834 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:22 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: from numpy import show_config as show_numpy_config 2026-03-10T07:03:22.834 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:22 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: 2026-03-10T07:03:22.631+0000 7f03f4f4b140 -1 mgr[py] Module diskprediction_local has missing NOTIFY_TYPES member 2026-03-10T07:03:22.834 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:22 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: 2026-03-10T07:03:22.674+0000 7f03f4f4b140 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-10T07:03:22.834 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:22 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: 2026-03-10T07:03:22.750+0000 7f03f4f4b140 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-10T07:03:23.503 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:23 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[73978]: 2026-03-10T07:03:23.225+0000 7f65d3082140 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-10T07:03:23.503 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:23 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[73978]: 2026-03-10T07:03:23.342+0000 7f65d3082140 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-10T07:03:23.503 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:23 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[73978]: 2026-03-10T07:03:23.384+0000 7f65d3082140 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-10T07:03:23.503 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:23 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[73978]: 2026-03-10T07:03:23.420+0000 7f65d3082140 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-10T07:03:23.503 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:23 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[73978]: 2026-03-10T07:03:23.463+0000 7f65d3082140 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-10T07:03:23.552 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:23 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: 2026-03-10T07:03:23.271+0000 7f03f4f4b140 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-10T07:03:23.552 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:23 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: 2026-03-10T07:03:23.387+0000 7f03f4f4b140 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-10T07:03:23.552 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:23 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: 2026-03-10T07:03:23.429+0000 7f03f4f4b140 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-10T07:03:23.552 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:23 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: 2026-03-10T07:03:23.466+0000 7f03f4f4b140 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-10T07:03:23.552 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:23 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: 2026-03-10T07:03:23.510+0000 7f03f4f4b140 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-10T07:03:23.834 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:23 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: 2026-03-10T07:03:23.551+0000 7f03f4f4b140 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-10T07:03:23.835 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:23 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: 2026-03-10T07:03:23.737+0000 7f03f4f4b140 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-10T07:03:23.835 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:23 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: 2026-03-10T07:03:23.789+0000 7f03f4f4b140 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-10T07:03:23.988 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:23 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[73978]: 2026-03-10T07:03:23.509+0000 7f65d3082140 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-10T07:03:23.988 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:23 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[73978]: 2026-03-10T07:03:23.694+0000 7f65d3082140 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-10T07:03:23.988 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:23 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[73978]: 2026-03-10T07:03:23.748+0000 7f65d3082140 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-10T07:03:24.253 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:23 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[73978]: 2026-03-10T07:03:23.986+0000 7f65d3082140 -1 mgr[py] Module rgw has missing NOTIFY_TYPES member 2026-03-10T07:03:24.316 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:24 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: 2026-03-10T07:03:24.018+0000 7f03f4f4b140 -1 mgr[py] Module rgw has missing NOTIFY_TYPES member 2026-03-10T07:03:24.585 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:24 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: 2026-03-10T07:03:24.315+0000 7f03f4f4b140 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-10T07:03:24.585 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:24 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: 2026-03-10T07:03:24.354+0000 7f03f4f4b140 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-10T07:03:24.585 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:24 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: 2026-03-10T07:03:24.398+0000 7f03f4f4b140 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-10T07:03:24.585 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:24 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: 2026-03-10T07:03:24.481+0000 7f03f4f4b140 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-10T07:03:24.585 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:24 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: 2026-03-10T07:03:24.522+0000 7f03f4f4b140 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-10T07:03:24.605 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:24 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[73978]: 2026-03-10T07:03:24.297+0000 7f65d3082140 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-10T07:03:24.606 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:24 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[73978]: 2026-03-10T07:03:24.337+0000 7f65d3082140 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-10T07:03:24.606 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:24 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[73978]: 2026-03-10T07:03:24.379+0000 7f65d3082140 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-10T07:03:24.606 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:24 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[73978]: 2026-03-10T07:03:24.462+0000 7f65d3082140 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-10T07:03:24.606 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:24 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[73978]: 2026-03-10T07:03:24.511+0000 7f65d3082140 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-10T07:03:24.875 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:24 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: 2026-03-10T07:03:24.610+0000 7f03f4f4b140 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-10T07:03:24.875 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:24 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: 2026-03-10T07:03:24.730+0000 7f03f4f4b140 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-10T07:03:24.889 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:24 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[73978]: 2026-03-10T07:03:24.604+0000 7f65d3082140 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-10T07:03:24.889 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:24 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[73978]: 2026-03-10T07:03:24.732+0000 7f65d3082140 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-10T07:03:25.206 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:25 vm02 ceph-mon[54377]: Active manager daemon y restarted 2026-03-10T07:03:25.207 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:25 vm02 ceph-mon[54377]: Activating manager daemon y 2026-03-10T07:03:25.207 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:24 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: 2026-03-10T07:03:24.874+0000 7f03f4f4b140 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-10T07:03:25.207 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:24 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: 2026-03-10T07:03:24.914+0000 7f03f4f4b140 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-10T07:03:25.207 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:25 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: [10/Mar/2026:07:03:25] ENGINE Bus STARTING 2026-03-10T07:03:25.207 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:25 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: CherryPy Checker: 2026-03-10T07:03:25.207 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:25 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: The Application mounted at '' has an empty config. 2026-03-10T07:03:25.207 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:25 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: 2026-03-10T07:03:25.207 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:25 vm02 ceph-mon[86149]: Active manager daemon y restarted 2026-03-10T07:03:25.207 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:25 vm02 ceph-mon[86149]: Activating manager daemon y 2026-03-10T07:03:25.253 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:24 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[73978]: 2026-03-10T07:03:24.887+0000 7f65d3082140 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-10T07:03:25.254 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:24 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[73978]: 2026-03-10T07:03:24.929+0000 7f65d3082140 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-10T07:03:25.254 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:24 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[73978]: [10/Mar/2026:07:03:24] ENGINE Bus STARTING 2026-03-10T07:03:25.254 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:24 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[73978]: CherryPy Checker: 2026-03-10T07:03:25.254 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:24 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[73978]: The Application mounted at '' has an empty config. 2026-03-10T07:03:25.254 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:24 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[73978]: 2026-03-10T07:03:25.254 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:25 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[73978]: [10/Mar/2026:07:03:25] ENGINE Serving on http://:::9283 2026-03-10T07:03:25.254 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:03:25 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x[73978]: [10/Mar/2026:07:03:25] ENGINE Bus STARTED 2026-03-10T07:03:25.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:25 vm05 ceph-mon[48591]: Active manager daemon y restarted 2026-03-10T07:03:25.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:25 vm05 ceph-mon[48591]: Activating manager daemon y 2026-03-10T07:03:25.585 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:25 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: [10/Mar/2026:07:03:25] ENGINE Serving on http://:::9283 2026-03-10T07:03:25.585 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:25 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: [10/Mar/2026:07:03:25] ENGINE Bus STARTED 2026-03-10T07:03:26.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:26 vm05 ceph-mon[48591]: from='mgr.? 192.168.123.105:0/1244114256' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-10T07:03:26.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:26 vm05 ceph-mon[48591]: osdmap e86: 8 total, 8 up, 8 in 2026-03-10T07:03:26.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:26 vm05 ceph-mon[48591]: mgrmap e30: y(active, starting, since 0.0873105s), standbys: x 2026-03-10T07:03:26.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:26 vm05 ceph-mon[48591]: Standby manager daemon x restarted 2026-03-10T07:03:26.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:26 vm05 ceph-mon[48591]: Standby manager daemon x started 2026-03-10T07:03:26.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:26 vm05 ceph-mon[48591]: from='mgr.? 192.168.123.105:0/1244114256' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T07:03:26.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:26 vm05 ceph-mon[48591]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T07:03:26.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:26 vm05 ceph-mon[48591]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T07:03:26.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:26 vm05 ceph-mon[48591]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T07:03:26.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:26 vm05 ceph-mon[48591]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-10T07:03:26.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:26 vm05 ceph-mon[48591]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-10T07:03:26.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:26 vm05 ceph-mon[48591]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T07:03:26.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:26 vm05 ceph-mon[48591]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T07:03:26.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:26 vm05 ceph-mon[48591]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T07:03:26.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:26 vm05 ceph-mon[48591]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T07:03:26.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:26 vm05 ceph-mon[48591]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T07:03:26.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:26 vm05 ceph-mon[48591]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T07:03:26.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:26 vm05 ceph-mon[48591]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T07:03:26.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:26 vm05 ceph-mon[48591]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T07:03:26.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:26 vm05 ceph-mon[48591]: from='mgr.? 192.168.123.105:0/1244114256' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-10T07:03:26.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:26 vm05 ceph-mon[48591]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T07:03:26.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:26 vm05 ceph-mon[48591]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T07:03:26.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:26 vm05 ceph-mon[48591]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T07:03:26.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:26 vm05 ceph-mon[48591]: from='mgr.? 192.168.123.105:0/1244114256' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T07:03:26.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:26 vm05 ceph-mon[48591]: Manager daemon y is now available 2026-03-10T07:03:26.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:26 vm05 ceph-mon[48591]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T07:03:26.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:26 vm05 ceph-mon[48591]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T07:03:26.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:26 vm05 ceph-mon[48591]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T07:03:26.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:26 vm05 ceph-mon[48591]: mgrmap e31: y(active, since 1.0905s), standbys: x 2026-03-10T07:03:26.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:26 vm02 ceph-mon[54377]: from='mgr.? 192.168.123.105:0/1244114256' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-10T07:03:26.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:26 vm02 ceph-mon[54377]: osdmap e86: 8 total, 8 up, 8 in 2026-03-10T07:03:26.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:26 vm02 ceph-mon[54377]: mgrmap e30: y(active, starting, since 0.0873105s), standbys: x 2026-03-10T07:03:26.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:26 vm02 ceph-mon[54377]: Standby manager daemon x restarted 2026-03-10T07:03:26.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:26 vm02 ceph-mon[54377]: Standby manager daemon x started 2026-03-10T07:03:26.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:26 vm02 ceph-mon[54377]: from='mgr.? 192.168.123.105:0/1244114256' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T07:03:26.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:26 vm02 ceph-mon[54377]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T07:03:26.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:26 vm02 ceph-mon[54377]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T07:03:26.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:26 vm02 ceph-mon[54377]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T07:03:26.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:26 vm02 ceph-mon[54377]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-10T07:03:26.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:26 vm02 ceph-mon[54377]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-10T07:03:26.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:26 vm02 ceph-mon[54377]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T07:03:26.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:26 vm02 ceph-mon[54377]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T07:03:26.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:26 vm02 ceph-mon[54377]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T07:03:26.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:26 vm02 ceph-mon[54377]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T07:03:26.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:26 vm02 ceph-mon[54377]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T07:03:26.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:26 vm02 ceph-mon[54377]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T07:03:26.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:26 vm02 ceph-mon[54377]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T07:03:26.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:26 vm02 ceph-mon[54377]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T07:03:26.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:26 vm02 ceph-mon[54377]: from='mgr.? 192.168.123.105:0/1244114256' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-10T07:03:26.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:26 vm02 ceph-mon[54377]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T07:03:26.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:26 vm02 ceph-mon[54377]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T07:03:26.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:26 vm02 ceph-mon[54377]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T07:03:26.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:26 vm02 ceph-mon[54377]: from='mgr.? 192.168.123.105:0/1244114256' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T07:03:26.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:26 vm02 ceph-mon[54377]: Manager daemon y is now available 2026-03-10T07:03:26.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:26 vm02 ceph-mon[54377]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T07:03:26.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:26 vm02 ceph-mon[54377]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T07:03:26.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:26 vm02 ceph-mon[54377]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T07:03:26.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:26 vm02 ceph-mon[54377]: mgrmap e31: y(active, since 1.0905s), standbys: x 2026-03-10T07:03:26.335 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:26 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: 2026-03-10T07:03:26.024+0000 7f03c12b6640 -1 mgr.server handle_report got status from non-daemon mon.a 2026-03-10T07:03:26.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:26 vm02 ceph-mon[86149]: from='mgr.? 192.168.123.105:0/1244114256' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-10T07:03:26.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:26 vm02 ceph-mon[86149]: osdmap e86: 8 total, 8 up, 8 in 2026-03-10T07:03:26.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:26 vm02 ceph-mon[86149]: mgrmap e30: y(active, starting, since 0.0873105s), standbys: x 2026-03-10T07:03:26.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:26 vm02 ceph-mon[86149]: Standby manager daemon x restarted 2026-03-10T07:03:26.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:26 vm02 ceph-mon[86149]: Standby manager daemon x started 2026-03-10T07:03:26.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:26 vm02 ceph-mon[86149]: from='mgr.? 192.168.123.105:0/1244114256' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T07:03:26.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:26 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T07:03:26.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:26 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T07:03:26.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:26 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T07:03:26.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:26 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-10T07:03:26.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:26 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-10T07:03:26.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:26 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T07:03:26.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:26 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T07:03:26.336 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:26 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T07:03:26.336 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:26 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T07:03:26.336 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:26 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T07:03:26.336 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:26 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T07:03:26.336 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:26 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T07:03:26.336 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:26 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T07:03:26.336 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:26 vm02 ceph-mon[86149]: from='mgr.? 192.168.123.105:0/1244114256' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-10T07:03:26.336 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:26 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T07:03:26.336 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:26 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T07:03:26.336 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:26 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T07:03:26.336 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:26 vm02 ceph-mon[86149]: from='mgr.? 192.168.123.105:0/1244114256' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T07:03:26.336 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:26 vm02 ceph-mon[86149]: Manager daemon y is now available 2026-03-10T07:03:26.336 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:26 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T07:03:26.336 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:26 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-10T07:03:26.336 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:26 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-10T07:03:26.336 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:26 vm02 ceph-mon[86149]: mgrmap e31: y(active, since 1.0905s), standbys: x 2026-03-10T07:03:26.951 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:26 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:03:26.949Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm02\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"28bd35e6-1c4e-11f1-9057-21b3549603fc\", domainname=\"(none)\", instance=\"vm02\", job=\"node\", machine=\"x86_64\", nodename=\"vm02\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm02\", job=\"node\", machine=\"x86_64\", nodename=\"vm02\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T07:03:27.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:27 vm05 ceph-mon[48591]: [10/Mar/2026:07:03:26] ENGINE Bus STARTING 2026-03-10T07:03:27.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:27 vm05 ceph-mon[48591]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:27.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:27 vm05 ceph-mon[48591]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:27.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:27 vm05 ceph-mon[48591]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:27.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:27 vm05 ceph-mon[48591]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:27.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:27 vm02 ceph-mon[86149]: [10/Mar/2026:07:03:26] ENGINE Bus STARTING 2026-03-10T07:03:27.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:27 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:27.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:27 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:27.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:27 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:27.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:27 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:27.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:27 vm02 ceph-mon[54377]: [10/Mar/2026:07:03:26] ENGINE Bus STARTING 2026-03-10T07:03:27.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:27 vm02 ceph-mon[54377]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:27.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:27 vm02 ceph-mon[54377]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:27.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:27 vm02 ceph-mon[54377]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:27.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:27 vm02 ceph-mon[54377]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:28.403 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:28 vm05 ceph-mon[48591]: [10/Mar/2026:07:03:26] ENGINE Serving on https://192.168.123.102:7150 2026-03-10T07:03:28.403 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:28 vm05 ceph-mon[48591]: [10/Mar/2026:07:03:26] ENGINE Client ('192.168.123.102', 39686) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-10T07:03:28.403 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:28 vm05 ceph-mon[48591]: [10/Mar/2026:07:03:26] ENGINE Serving on http://192.168.123.102:8765 2026-03-10T07:03:28.403 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:28 vm05 ceph-mon[48591]: [10/Mar/2026:07:03:26] ENGINE Bus STARTED 2026-03-10T07:03:28.403 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:28 vm05 ceph-mon[48591]: pgmap v4: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail 2026-03-10T07:03:28.403 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:28 vm05 ceph-mon[48591]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:28.403 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:28 vm05 ceph-mon[48591]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:28.403 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:28 vm05 ceph-mon[48591]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:28.403 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:28 vm05 ceph-mon[48591]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:28.403 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:28 vm05 ceph-mon[48591]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm02", "name": "osd_memory_target"}]: dispatch 2026-03-10T07:03:28.403 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:28 vm05 ceph-mon[48591]: mgrmap e32: y(active, since 2s), standbys: x 2026-03-10T07:03:28.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:28 vm02 ceph-mon[86149]: [10/Mar/2026:07:03:26] ENGINE Serving on https://192.168.123.102:7150 2026-03-10T07:03:28.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:28 vm02 ceph-mon[86149]: [10/Mar/2026:07:03:26] ENGINE Client ('192.168.123.102', 39686) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-10T07:03:28.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:28 vm02 ceph-mon[86149]: [10/Mar/2026:07:03:26] ENGINE Serving on http://192.168.123.102:8765 2026-03-10T07:03:28.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:28 vm02 ceph-mon[86149]: [10/Mar/2026:07:03:26] ENGINE Bus STARTED 2026-03-10T07:03:28.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:28 vm02 ceph-mon[86149]: pgmap v4: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail 2026-03-10T07:03:28.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:28 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:28.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:28 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:28.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:28 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:28.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:28 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:28.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:28 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm02", "name": "osd_memory_target"}]: dispatch 2026-03-10T07:03:28.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:28 vm02 ceph-mon[86149]: mgrmap e32: y(active, since 2s), standbys: x 2026-03-10T07:03:28.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:28 vm02 ceph-mon[54377]: [10/Mar/2026:07:03:26] ENGINE Serving on https://192.168.123.102:7150 2026-03-10T07:03:28.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:28 vm02 ceph-mon[54377]: [10/Mar/2026:07:03:26] ENGINE Client ('192.168.123.102', 39686) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-10T07:03:28.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:28 vm02 ceph-mon[54377]: [10/Mar/2026:07:03:26] ENGINE Serving on http://192.168.123.102:8765 2026-03-10T07:03:28.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:28 vm02 ceph-mon[54377]: [10/Mar/2026:07:03:26] ENGINE Bus STARTED 2026-03-10T07:03:28.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:28 vm02 ceph-mon[54377]: pgmap v4: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail 2026-03-10T07:03:28.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:28 vm02 ceph-mon[54377]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:28.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:28 vm02 ceph-mon[54377]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:28.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:28 vm02 ceph-mon[54377]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:28.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:28 vm02 ceph-mon[54377]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:28.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:28 vm02 ceph-mon[54377]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm02", "name": "osd_memory_target"}]: dispatch 2026-03-10T07:03:28.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:28 vm02 ceph-mon[54377]: mgrmap e32: y(active, since 2s), standbys: x 2026-03-10T07:03:29.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:29 vm02 ceph-mon[54377]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:29.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:29 vm02 ceph-mon[54377]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:29.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:29 vm02 ceph-mon[54377]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm05", "name": "osd_memory_target"}]: dispatch 2026-03-10T07:03:29.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:29 vm02 ceph-mon[54377]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:03:29.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:29 vm02 ceph-mon[54377]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T07:03:29.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:29 vm02 ceph-mon[54377]: Updating vm02:/etc/ceph/ceph.conf 2026-03-10T07:03:29.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:29 vm02 ceph-mon[54377]: Updating vm05:/etc/ceph/ceph.conf 2026-03-10T07:03:29.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:29 vm02 ceph-mon[54377]: Updating vm02:/var/lib/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/config/ceph.conf 2026-03-10T07:03:29.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:29 vm02 ceph-mon[54377]: Updating vm05:/var/lib/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/config/ceph.conf 2026-03-10T07:03:29.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:29 vm02 ceph-mon[54377]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:29.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:29 vm02 ceph-mon[54377]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:29.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:29 vm02 ceph-mon[54377]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:29.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:29 vm02 ceph-mon[54377]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:29.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:29 vm02 ceph-mon[54377]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:29.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:29 vm02 ceph-mon[54377]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T07:03:29.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:29 vm02 ceph-mon[54377]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:03:29.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:29 vm02 ceph-mon[54377]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "quorum_status"}]: dispatch 2026-03-10T07:03:29.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:29 vm02 ceph-mon[54377]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "mon ok-to-stop", "ids": ["c"]}]: dispatch 2026-03-10T07:03:29.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:29 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:29.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:29 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:29.586 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:29 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm05", "name": "osd_memory_target"}]: dispatch 2026-03-10T07:03:29.586 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:29 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:03:29.586 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:29 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T07:03:29.586 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:29 vm02 ceph-mon[86149]: Updating vm02:/etc/ceph/ceph.conf 2026-03-10T07:03:29.586 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:29 vm02 ceph-mon[86149]: Updating vm05:/etc/ceph/ceph.conf 2026-03-10T07:03:29.586 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:29 vm02 ceph-mon[86149]: Updating vm02:/var/lib/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/config/ceph.conf 2026-03-10T07:03:29.586 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:29 vm02 ceph-mon[86149]: Updating vm05:/var/lib/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/config/ceph.conf 2026-03-10T07:03:29.586 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:29 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:29.586 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:29 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:29.586 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:29 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:29.586 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:29 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:29.586 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:29 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:29.586 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:29 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T07:03:29.586 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:29 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:03:29.586 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:29 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "quorum_status"}]: dispatch 2026-03-10T07:03:29.586 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:29 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "mon ok-to-stop", "ids": ["c"]}]: dispatch 2026-03-10T07:03:29.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:29 vm05 ceph-mon[48591]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:29.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:29 vm05 ceph-mon[48591]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:29.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:29 vm05 ceph-mon[48591]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm05", "name": "osd_memory_target"}]: dispatch 2026-03-10T07:03:29.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:29 vm05 ceph-mon[48591]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:03:29.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:29 vm05 ceph-mon[48591]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T07:03:29.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:29 vm05 ceph-mon[48591]: Updating vm02:/etc/ceph/ceph.conf 2026-03-10T07:03:29.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:29 vm05 ceph-mon[48591]: Updating vm05:/etc/ceph/ceph.conf 2026-03-10T07:03:29.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:29 vm05 ceph-mon[48591]: Updating vm02:/var/lib/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/config/ceph.conf 2026-03-10T07:03:29.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:29 vm05 ceph-mon[48591]: Updating vm05:/var/lib/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/config/ceph.conf 2026-03-10T07:03:29.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:29 vm05 ceph-mon[48591]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:29.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:29 vm05 ceph-mon[48591]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:29.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:29 vm05 ceph-mon[48591]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:29.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:29 vm05 ceph-mon[48591]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:29.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:29 vm05 ceph-mon[48591]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:29.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:29 vm05 ceph-mon[48591]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T07:03:29.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:29 vm05 ceph-mon[48591]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:03:29.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:29 vm05 ceph-mon[48591]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "quorum_status"}]: dispatch 2026-03-10T07:03:29.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:29 vm05 ceph-mon[48591]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "mon ok-to-stop", "ids": ["c"]}]: dispatch 2026-03-10T07:03:30.021 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:29 vm02 systemd[1]: Stopping Ceph mon.c for 28bd35e6-1c4e-11f1-9057-21b3549603fc... 2026-03-10T07:03:30.292 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mon-c[54373]: 2026-03-10T07:03:30.019+0000 7fd9f1bdb700 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-mon -n mon.c -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false (PID: 1) UID: 0 2026-03-10T07:03:30.292 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mon-c[54373]: 2026-03-10T07:03:30.019+0000 7fd9f1bdb700 -1 mon.c@1(peon) e3 *** Got Signal Terminated *** 2026-03-10T07:03:30.292 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 podman[87852]: 2026-03-10 07:03:30.218367904 +0000 UTC m=+0.217451724 container died d42e67599bdc846e6b6caef3aa5c296ea16b0193a8e579dc046e1a05723bf0c3 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mon-c, vcs-type=git, build-date=2022-05-03T08:36:31.336870, com.redhat.component=centos-stream-container, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, io.openshift.tags=base centos centos-stream, maintainer=Guillaume Abrioux , vendor=Red Hat, Inc., url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, architecture=x86_64, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., name=centos-stream, RELEASE=HEAD, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, io.k8s.display-name=CentOS Stream 8, io.openshift.expose-services=, distribution-scope=public, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, GIT_CLEAN=True, version=8, CEPH_POINT_RELEASE=-17.2.0, GIT_BRANCH=HEAD, GIT_REPO=https://github.com/ceph/ceph-container.git, io.buildah.version=1.19.8, ceph=True, release=754, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb) 2026-03-10T07:03:30.292 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 podman[87852]: 2026-03-10 07:03:30.242148566 +0000 UTC m=+0.241232377 container remove d42e67599bdc846e6b6caef3aa5c296ea16b0193a8e579dc046e1a05723bf0c3 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mon-c, io.k8s.display-name=CentOS Stream 8, io.openshift.expose-services=, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, vendor=Red Hat, Inc., CEPH_POINT_RELEASE=-17.2.0, architecture=x86_64, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.tags=base centos centos-stream, version=8, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, build-date=2022-05-03T08:36:31.336870, vcs-type=git, GIT_BRANCH=HEAD, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, io.buildah.version=1.19.8, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, GIT_CLEAN=True, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, com.redhat.component=centos-stream-container, ceph=True, release=754, GIT_REPO=https://github.com/ceph/ceph-container.git, maintainer=Guillaume Abrioux , RELEASE=HEAD, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, distribution-scope=public, name=centos-stream, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754) 2026-03-10T07:03:30.292 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 bash[87852]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mon-c 2026-03-10T07:03:30.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 systemd[1]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@mon.c.service: Deactivated successfully. 2026-03-10T07:03:30.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 systemd[1]: Stopped Ceph mon.c for 28bd35e6-1c4e-11f1-9057-21b3549603fc. 2026-03-10T07:03:30.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 systemd[1]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@mon.c.service: Consumed 4.241s CPU time. 2026-03-10T07:03:30.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 systemd[1]: Starting Ceph mon.c for 28bd35e6-1c4e-11f1-9057-21b3549603fc... 2026-03-10T07:03:31.086 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 podman[87966]: 2026-03-10 07:03:30.599202954 +0000 UTC m=+0.020272165 container create 959c5054b3d979eeab9c66308ac0737005f2f2cf5cdfefc67da8e1d45942e1ea (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mon-c, CEPH_REF=squid, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20260223, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df) 2026-03-10T07:03:31.086 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 podman[87966]: 2026-03-10 07:03:30.638183798 +0000 UTC m=+0.059253020 container init 959c5054b3d979eeab9c66308ac0737005f2f2cf5cdfefc67da8e1d45942e1ea (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mon-c, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, CEPH_REF=squid, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20260223, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, io.buildah.version=1.41.3, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.schema-version=1.0, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df) 2026-03-10T07:03:31.086 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 podman[87966]: 2026-03-10 07:03:30.641998879 +0000 UTC m=+0.063068081 container start 959c5054b3d979eeab9c66308ac0737005f2f2cf5cdfefc67da8e1d45942e1ea (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mon-c, CEPH_REF=squid, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20260223, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-10T07:03:31.086 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 bash[87966]: 959c5054b3d979eeab9c66308ac0737005f2f2cf5cdfefc67da8e1d45942e1ea 2026-03-10T07:03:31.086 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 podman[87966]: 2026-03-10 07:03:30.591733112 +0000 UTC m=+0.012802314 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T07:03:31.086 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 systemd[1]: Started Ceph mon.c for 28bd35e6-1c4e-11f1-9057-21b3549603fc. 2026-03-10T07:03:31.086 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: set uid:gid to 167:167 (ceph:ceph) 2026-03-10T07:03:31.086 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable), process ceph-mon, pid 2 2026-03-10T07:03:31.086 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: pidfile_write: ignore empty --pid-file 2026-03-10T07:03:31.086 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: load: jerasure load: lrc 2026-03-10T07:03:31.086 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: RocksDB version: 7.9.2 2026-03-10T07:03:31.086 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Git sha 0 2026-03-10T07:03:31.086 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Compile date 2026-02-25 18:11:04 2026-03-10T07:03:31.086 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: DB SUMMARY 2026-03-10T07:03:31.086 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: DB Session ID: WAIWW9W9SPGQTDT6Y6KY 2026-03-10T07:03:31.086 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: CURRENT file: CURRENT 2026-03-10T07:03:31.086 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: IDENTITY file: IDENTITY 2026-03-10T07:03:31.086 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: MANIFEST file: MANIFEST-000009 size: 316 Bytes 2026-03-10T07:03:31.086 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: SST files in /var/lib/ceph/mon/ceph-c/store.db dir, Total Num: 1, files: 000015.sst 2026-03-10T07:03:31.086 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Write Ahead Log file in /var/lib/ceph/mon/ceph-c/store.db: 000013.log size: 10318197 ; 2026-03-10T07:03:31.086 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.error_if_exists: 0 2026-03-10T07:03:31.086 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.create_if_missing: 0 2026-03-10T07:03:31.086 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.paranoid_checks: 1 2026-03-10T07:03:31.086 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.flush_verify_memtable_count: 1 2026-03-10T07:03:31.086 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.track_and_verify_wals_in_manifest: 0 2026-03-10T07:03:31.087 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.verify_sst_unique_id_in_manifest: 1 2026-03-10T07:03:31.087 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.env: 0x557f55565dc0 2026-03-10T07:03:31.087 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.fs: PosixFileSystem 2026-03-10T07:03:31.087 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.info_log: 0x557f576e85c0 2026-03-10T07:03:31.087 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.max_file_opening_threads: 16 2026-03-10T07:03:31.087 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.statistics: (nil) 2026-03-10T07:03:31.087 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.use_fsync: 0 2026-03-10T07:03:31.087 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.max_log_file_size: 0 2026-03-10T07:03:31.087 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.max_manifest_file_size: 1073741824 2026-03-10T07:03:31.087 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.log_file_time_to_roll: 0 2026-03-10T07:03:31.087 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.keep_log_file_num: 1000 2026-03-10T07:03:31.087 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.recycle_log_file_num: 0 2026-03-10T07:03:31.087 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.allow_fallocate: 1 2026-03-10T07:03:31.087 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.allow_mmap_reads: 0 2026-03-10T07:03:31.087 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.allow_mmap_writes: 0 2026-03-10T07:03:31.087 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.use_direct_reads: 0 2026-03-10T07:03:31.087 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.use_direct_io_for_flush_and_compaction: 0 2026-03-10T07:03:31.087 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.create_missing_column_families: 0 2026-03-10T07:03:31.087 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.db_log_dir: 2026-03-10T07:03:31.087 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.wal_dir: 2026-03-10T07:03:31.087 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.table_cache_numshardbits: 6 2026-03-10T07:03:31.087 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.WAL_ttl_seconds: 0 2026-03-10T07:03:31.087 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.WAL_size_limit_MB: 0 2026-03-10T07:03:31.087 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.max_write_batch_group_size_bytes: 1048576 2026-03-10T07:03:31.087 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.manifest_preallocation_size: 4194304 2026-03-10T07:03:31.087 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.is_fd_close_on_exec: 1 2026-03-10T07:03:31.087 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.advise_random_on_open: 1 2026-03-10T07:03:31.087 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.db_write_buffer_size: 0 2026-03-10T07:03:31.087 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.write_buffer_manager: 0x557f576ed900 2026-03-10T07:03:31.087 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.access_hint_on_compaction_start: 1 2026-03-10T07:03:31.087 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.random_access_max_buffer_size: 1048576 2026-03-10T07:03:31.087 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.use_adaptive_mutex: 0 2026-03-10T07:03:31.087 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.rate_limiter: (nil) 2026-03-10T07:03:31.087 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.sst_file_manager.rate_bytes_per_sec: 0 2026-03-10T07:03:31.087 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.wal_recovery_mode: 2 2026-03-10T07:03:31.087 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.enable_thread_tracking: 0 2026-03-10T07:03:31.087 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.enable_pipelined_write: 0 2026-03-10T07:03:31.087 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.unordered_write: 0 2026-03-10T07:03:31.087 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.allow_concurrent_memtable_write: 1 2026-03-10T07:03:31.087 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.enable_write_thread_adaptive_yield: 1 2026-03-10T07:03:31.087 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.write_thread_max_yield_usec: 100 2026-03-10T07:03:31.087 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.write_thread_slow_yield_usec: 3 2026-03-10T07:03:31.087 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.row_cache: None 2026-03-10T07:03:31.087 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.wal_filter: None 2026-03-10T07:03:31.087 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.avoid_flush_during_recovery: 0 2026-03-10T07:03:31.087 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.allow_ingest_behind: 0 2026-03-10T07:03:31.087 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.two_write_queues: 0 2026-03-10T07:03:31.087 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.manual_wal_flush: 0 2026-03-10T07:03:31.087 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.wal_compression: 0 2026-03-10T07:03:31.087 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.atomic_flush: 0 2026-03-10T07:03:31.087 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.avoid_unnecessary_blocking_io: 0 2026-03-10T07:03:31.087 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.persist_stats_to_disk: 0 2026-03-10T07:03:31.087 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.write_dbid_to_manifest: 0 2026-03-10T07:03:31.087 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.log_readahead_size: 0 2026-03-10T07:03:31.087 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.file_checksum_gen_factory: Unknown 2026-03-10T07:03:31.087 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.best_efforts_recovery: 0 2026-03-10T07:03:31.087 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.max_bgerror_resume_count: 2147483647 2026-03-10T07:03:31.087 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.bgerror_resume_retry_interval: 1000000 2026-03-10T07:03:31.087 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.allow_data_in_errors: 0 2026-03-10T07:03:31.087 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.db_host_id: __hostname__ 2026-03-10T07:03:31.088 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.enforce_single_del_contracts: true 2026-03-10T07:03:31.088 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.max_background_jobs: 2 2026-03-10T07:03:31.088 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.max_background_compactions: -1 2026-03-10T07:03:31.088 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.max_subcompactions: 1 2026-03-10T07:03:31.088 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.avoid_flush_during_shutdown: 0 2026-03-10T07:03:31.088 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.writable_file_max_buffer_size: 1048576 2026-03-10T07:03:31.088 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.delayed_write_rate : 16777216 2026-03-10T07:03:31.088 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.max_total_wal_size: 0 2026-03-10T07:03:31.088 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.delete_obsolete_files_period_micros: 21600000000 2026-03-10T07:03:31.088 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.stats_dump_period_sec: 600 2026-03-10T07:03:31.088 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.stats_persist_period_sec: 600 2026-03-10T07:03:31.088 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.stats_history_buffer_size: 1048576 2026-03-10T07:03:31.088 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.max_open_files: -1 2026-03-10T07:03:31.088 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.bytes_per_sync: 0 2026-03-10T07:03:31.088 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.wal_bytes_per_sync: 0 2026-03-10T07:03:31.088 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.strict_bytes_per_sync: 0 2026-03-10T07:03:31.088 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.compaction_readahead_size: 0 2026-03-10T07:03:31.088 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.max_background_flushes: -1 2026-03-10T07:03:31.088 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Compression algorithms supported: 2026-03-10T07:03:31.088 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: kZSTD supported: 0 2026-03-10T07:03:31.088 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: kXpressCompression supported: 0 2026-03-10T07:03:31.088 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: kBZip2Compression supported: 0 2026-03-10T07:03:31.088 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: kZSTDNotFinalCompression supported: 0 2026-03-10T07:03:31.088 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: kLZ4Compression supported: 1 2026-03-10T07:03:31.088 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: kZlibCompression supported: 1 2026-03-10T07:03:31.088 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: kLZ4HCCompression supported: 1 2026-03-10T07:03:31.088 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: kSnappyCompression supported: 1 2026-03-10T07:03:31.088 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Fast CRC32 supported: Supported on x86 2026-03-10T07:03:31.088 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: DMutex implementation: pthread_mutex_t 2026-03-10T07:03:31.088 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: [db/version_set.cc:5527] Recovering from manifest file: /var/lib/ceph/mon/ceph-c/store.db/MANIFEST-000009 2026-03-10T07:03:31.088 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [default]: 2026-03-10T07:03:31.088 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.comparator: leveldb.BytewiseComparator 2026-03-10T07:03:31.088 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.merge_operator: 2026-03-10T07:03:31.088 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.compaction_filter: None 2026-03-10T07:03:31.088 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.compaction_filter_factory: None 2026-03-10T07:03:31.088 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.sst_partitioner_factory: None 2026-03-10T07:03:31.088 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.memtable_factory: SkipListFactory 2026-03-10T07:03:31.088 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.table_factory: BlockBasedTable 2026-03-10T07:03:31.088 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: table_factory options: flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x557f576e85a0) 2026-03-10T07:03:31.088 INFO:journalctl@ceph.mon.c.vm02.stdout: cache_index_and_filter_blocks: 1 2026-03-10T07:03:31.088 INFO:journalctl@ceph.mon.c.vm02.stdout: cache_index_and_filter_blocks_with_high_priority: 0 2026-03-10T07:03:31.088 INFO:journalctl@ceph.mon.c.vm02.stdout: pin_l0_filter_and_index_blocks_in_cache: 0 2026-03-10T07:03:31.088 INFO:journalctl@ceph.mon.c.vm02.stdout: pin_top_level_index_and_filter: 1 2026-03-10T07:03:31.088 INFO:journalctl@ceph.mon.c.vm02.stdout: index_type: 0 2026-03-10T07:03:31.088 INFO:journalctl@ceph.mon.c.vm02.stdout: data_block_index_type: 0 2026-03-10T07:03:31.088 INFO:journalctl@ceph.mon.c.vm02.stdout: index_shortening: 1 2026-03-10T07:03:31.088 INFO:journalctl@ceph.mon.c.vm02.stdout: data_block_hash_table_util_ratio: 0.750000 2026-03-10T07:03:31.088 INFO:journalctl@ceph.mon.c.vm02.stdout: checksum: 4 2026-03-10T07:03:31.088 INFO:journalctl@ceph.mon.c.vm02.stdout: no_block_cache: 0 2026-03-10T07:03:31.088 INFO:journalctl@ceph.mon.c.vm02.stdout: block_cache: 0x557f5770d350 2026-03-10T07:03:31.088 INFO:journalctl@ceph.mon.c.vm02.stdout: block_cache_name: BinnedLRUCache 2026-03-10T07:03:31.088 INFO:journalctl@ceph.mon.c.vm02.stdout: block_cache_options: 2026-03-10T07:03:31.088 INFO:journalctl@ceph.mon.c.vm02.stdout: capacity : 536870912 2026-03-10T07:03:31.088 INFO:journalctl@ceph.mon.c.vm02.stdout: num_shard_bits : 4 2026-03-10T07:03:31.088 INFO:journalctl@ceph.mon.c.vm02.stdout: strict_capacity_limit : 0 2026-03-10T07:03:31.088 INFO:journalctl@ceph.mon.c.vm02.stdout: high_pri_pool_ratio: 0.000 2026-03-10T07:03:31.088 INFO:journalctl@ceph.mon.c.vm02.stdout: block_cache_compressed: (nil) 2026-03-10T07:03:31.088 INFO:journalctl@ceph.mon.c.vm02.stdout: persistent_cache: (nil) 2026-03-10T07:03:31.088 INFO:journalctl@ceph.mon.c.vm02.stdout: block_size: 4096 2026-03-10T07:03:31.088 INFO:journalctl@ceph.mon.c.vm02.stdout: block_size_deviation: 10 2026-03-10T07:03:31.088 INFO:journalctl@ceph.mon.c.vm02.stdout: block_restart_interval: 16 2026-03-10T07:03:31.088 INFO:journalctl@ceph.mon.c.vm02.stdout: index_block_restart_interval: 1 2026-03-10T07:03:31.088 INFO:journalctl@ceph.mon.c.vm02.stdout: metadata_block_size: 4096 2026-03-10T07:03:31.089 INFO:journalctl@ceph.mon.c.vm02.stdout: partition_filters: 0 2026-03-10T07:03:31.089 INFO:journalctl@ceph.mon.c.vm02.stdout: use_delta_encoding: 1 2026-03-10T07:03:31.089 INFO:journalctl@ceph.mon.c.vm02.stdout: filter_policy: bloomfilter 2026-03-10T07:03:31.089 INFO:journalctl@ceph.mon.c.vm02.stdout: whole_key_filtering: 1 2026-03-10T07:03:31.089 INFO:journalctl@ceph.mon.c.vm02.stdout: verify_compression: 0 2026-03-10T07:03:31.089 INFO:journalctl@ceph.mon.c.vm02.stdout: read_amp_bytes_per_bit: 0 2026-03-10T07:03:31.089 INFO:journalctl@ceph.mon.c.vm02.stdout: format_version: 5 2026-03-10T07:03:31.089 INFO:journalctl@ceph.mon.c.vm02.stdout: enable_index_compression: 1 2026-03-10T07:03:31.089 INFO:journalctl@ceph.mon.c.vm02.stdout: block_align: 0 2026-03-10T07:03:31.089 INFO:journalctl@ceph.mon.c.vm02.stdout: max_auto_readahead_size: 262144 2026-03-10T07:03:31.089 INFO:journalctl@ceph.mon.c.vm02.stdout: prepopulate_block_cache: 0 2026-03-10T07:03:31.089 INFO:journalctl@ceph.mon.c.vm02.stdout: initial_auto_readahead_size: 8192 2026-03-10T07:03:31.089 INFO:journalctl@ceph.mon.c.vm02.stdout: num_file_reads_for_auto_readahead: 2 2026-03-10T07:03:31.089 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.write_buffer_size: 33554432 2026-03-10T07:03:31.089 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.max_write_buffer_number: 2 2026-03-10T07:03:31.089 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.compression: NoCompression 2026-03-10T07:03:31.089 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.bottommost_compression: Disabled 2026-03-10T07:03:31.089 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.prefix_extractor: nullptr 2026-03-10T07:03:31.089 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.memtable_insert_with_hint_prefix_extractor: nullptr 2026-03-10T07:03:31.089 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.num_levels: 7 2026-03-10T07:03:31.089 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.min_write_buffer_number_to_merge: 1 2026-03-10T07:03:31.089 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.max_write_buffer_number_to_maintain: 0 2026-03-10T07:03:31.089 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.max_write_buffer_size_to_maintain: 0 2026-03-10T07:03:31.089 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.bottommost_compression_opts.window_bits: -14 2026-03-10T07:03:31.089 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.bottommost_compression_opts.level: 32767 2026-03-10T07:03:31.089 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.bottommost_compression_opts.strategy: 0 2026-03-10T07:03:31.089 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.bottommost_compression_opts.max_dict_bytes: 0 2026-03-10T07:03:31.089 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.bottommost_compression_opts.zstd_max_train_bytes: 0 2026-03-10T07:03:31.089 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.bottommost_compression_opts.parallel_threads: 1 2026-03-10T07:03:31.089 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.bottommost_compression_opts.enabled: false 2026-03-10T07:03:31.089 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.bottommost_compression_opts.max_dict_buffer_bytes: 0 2026-03-10T07:03:31.089 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.bottommost_compression_opts.use_zstd_dict_trainer: true 2026-03-10T07:03:31.089 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.compression_opts.window_bits: -14 2026-03-10T07:03:31.089 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.compression_opts.level: 32767 2026-03-10T07:03:31.089 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.compression_opts.strategy: 0 2026-03-10T07:03:31.089 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.compression_opts.max_dict_bytes: 0 2026-03-10T07:03:31.089 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.compression_opts.zstd_max_train_bytes: 0 2026-03-10T07:03:31.089 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.compression_opts.use_zstd_dict_trainer: true 2026-03-10T07:03:31.089 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.compression_opts.parallel_threads: 1 2026-03-10T07:03:31.089 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.compression_opts.enabled: false 2026-03-10T07:03:31.089 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.compression_opts.max_dict_buffer_bytes: 0 2026-03-10T07:03:31.089 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.level0_file_num_compaction_trigger: 4 2026-03-10T07:03:31.089 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.level0_slowdown_writes_trigger: 20 2026-03-10T07:03:31.089 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.level0_stop_writes_trigger: 36 2026-03-10T07:03:31.089 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.target_file_size_base: 67108864 2026-03-10T07:03:31.089 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.target_file_size_multiplier: 1 2026-03-10T07:03:31.089 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.max_bytes_for_level_base: 268435456 2026-03-10T07:03:31.089 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.level_compaction_dynamic_level_bytes: 1 2026-03-10T07:03:31.089 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.max_bytes_for_level_multiplier: 10.000000 2026-03-10T07:03:31.089 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1 2026-03-10T07:03:31.089 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1 2026-03-10T07:03:31.089 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1 2026-03-10T07:03:31.089 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1 2026-03-10T07:03:31.089 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1 2026-03-10T07:03:31.089 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1 2026-03-10T07:03:31.089 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1 2026-03-10T07:03:31.089 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.max_sequential_skip_in_iterations: 8 2026-03-10T07:03:31.089 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.max_compaction_bytes: 1677721600 2026-03-10T07:03:31.089 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.ignore_max_compaction_bytes_for_input: true 2026-03-10T07:03:31.089 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.arena_block_size: 1048576 2026-03-10T07:03:31.089 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.soft_pending_compaction_bytes_limit: 68719476736 2026-03-10T07:03:31.089 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.hard_pending_compaction_bytes_limit: 274877906944 2026-03-10T07:03:31.090 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.disable_auto_compactions: 0 2026-03-10T07:03:31.090 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.compaction_style: kCompactionStyleLevel 2026-03-10T07:03:31.090 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.compaction_pri: kMinOverlappingRatio 2026-03-10T07:03:31.090 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.compaction_options_universal.size_ratio: 1 2026-03-10T07:03:31.090 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.compaction_options_universal.min_merge_width: 2 2026-03-10T07:03:31.090 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295 2026-03-10T07:03:31.090 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200 2026-03-10T07:03:31.090 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1 2026-03-10T07:03:31.090 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize 2026-03-10T07:03:31.090 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824 2026-03-10T07:03:31.090 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0 2026-03-10T07:03:31.090 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0); 2026-03-10T07:03:31.090 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.inplace_update_support: 0 2026-03-10T07:03:31.090 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.inplace_update_num_locks: 10000 2026-03-10T07:03:31.090 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.memtable_prefix_bloom_size_ratio: 0.000000 2026-03-10T07:03:31.090 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.memtable_whole_key_filtering: 0 2026-03-10T07:03:31.090 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.memtable_huge_page_size: 0 2026-03-10T07:03:31.090 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.bloom_locality: 0 2026-03-10T07:03:31.090 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.max_successive_merges: 0 2026-03-10T07:03:31.090 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.optimize_filters_for_hits: 0 2026-03-10T07:03:31.090 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.paranoid_file_checks: 0 2026-03-10T07:03:31.090 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.force_consistency_checks: 1 2026-03-10T07:03:31.090 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.report_bg_io_stats: 0 2026-03-10T07:03:31.090 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.ttl: 2592000 2026-03-10T07:03:31.090 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.periodic_compaction_seconds: 0 2026-03-10T07:03:31.090 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.preclude_last_level_data_seconds: 0 2026-03-10T07:03:31.090 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.preserve_internal_time_seconds: 0 2026-03-10T07:03:31.090 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.enable_blob_files: false 2026-03-10T07:03:31.090 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.min_blob_size: 0 2026-03-10T07:03:31.090 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.blob_file_size: 268435456 2026-03-10T07:03:31.090 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.blob_compression_type: NoCompression 2026-03-10T07:03:31.090 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.enable_blob_garbage_collection: false 2026-03-10T07:03:31.090 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.blob_garbage_collection_age_cutoff: 0.250000 2026-03-10T07:03:31.090 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000 2026-03-10T07:03:31.090 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.blob_compaction_readahead_size: 0 2026-03-10T07:03:31.090 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.blob_file_starting_level: 0 2026-03-10T07:03:31.090 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: Options.experimental_mempurge_threshold: 0.000000 2026-03-10T07:03:31.090 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: [table/block_based/block_based_table_reader.cc:721] At least one SST file opened without unique ID to verify: 15.sst 2026-03-10T07:03:31.090 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed. 2026-03-10T07:03:31.090 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: [db/version_set.cc:5566] Recovered from manifest file:/var/lib/ceph/mon/ceph-c/store.db/MANIFEST-000009 succeeded,manifest_file_number is 9, next_file_number is 17, last_sequence is 7726, log_number is 13,prev_log_number is 0,max_column_family is 0,min_log_number_to_keep is 0 2026-03-10T07:03:31.090 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: [db/version_set.cc:5581] Column family [default] (ID 0), log number is 13 2026-03-10T07:03:31.090 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: [db/db_impl/db_impl_open.cc:539] DB ID: e4e3137f-06cd-4c2d-be41-0b0a8feed9a1 2026-03-10T07:03:31.090 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: EVENT_LOG_v1 {"time_micros": 1773126210671771, "job": 1, "event": "recovery_started", "wal_files": [13]} 2026-03-10T07:03:31.090 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: [db/db_impl/db_impl_open.cc:1043] Recovering log #13 mode 2 2026-03-10T07:03:31.090 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: EVENT_LOG_v1 {"time_micros": 1773126210698545, "cf_name": "default", "job": 1, "event": "table_file_creation", "file_number": 18, "file_size": 5963427, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 7731, "largest_seqno": 10180, "table_properties": {"data_size": 5952380, "index_size": 7382, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 2629, "raw_key_size": 24655, "raw_average_key_size": 24, "raw_value_size": 5929710, "raw_average_value_size": 5779, "num_data_blocks": 345, "num_entries": 1026, "num_filter_entries": 1026, "num_deletions": 2, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1773126210, "oldest_key_time": 0, "file_creation_time": 0, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "e4e3137f-06cd-4c2d-be41-0b0a8feed9a1", "db_session_id": "WAIWW9W9SPGQTDT6Y6KY", "orig_file_number": 18, "seqno_to_time_mapping": "N/A"}} 2026-03-10T07:03:31.090 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: EVENT_LOG_v1 {"time_micros": 1773126210698668, "job": 1, "event": "recovery_finished"} 2026-03-10T07:03:31.090 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: [db/version_set.cc:5047] Creating manifest 20 2026-03-10T07:03:31.090 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed. 2026-03-10T07:03:31.090 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-c/store.db/000013.log immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000 2026-03-10T07:03:31.090 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: [db/db_impl/db_impl_open.cc:1987] SstFileManager instance 0x557f5770ee00 2026-03-10T07:03:31.090 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: DB pointer 0x557f57824000 2026-03-10T07:03:31.090 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS ------- 2026-03-10T07:03:31.090 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: rocksdb: [db/db_impl/db_impl.cc:1111] 2026-03-10T07:03:31.090 INFO:journalctl@ceph.mon.c.vm02.stdout: ** DB Stats ** 2026-03-10T07:03:31.090 INFO:journalctl@ceph.mon.c.vm02.stdout: Uptime(secs): 0.0 total, 0.0 interval 2026-03-10T07:03:31.090 INFO:journalctl@ceph.mon.c.vm02.stdout: Cumulative writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 GB, 0.00 MB/s 2026-03-10T07:03:31.090 INFO:journalctl@ceph.mon.c.vm02.stdout: Cumulative WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s 2026-03-10T07:03:31.090 INFO:journalctl@ceph.mon.c.vm02.stdout: Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent 2026-03-10T07:03:31.090 INFO:journalctl@ceph.mon.c.vm02.stdout: Interval writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 MB, 0.00 MB/s 2026-03-10T07:03:31.090 INFO:journalctl@ceph.mon.c.vm02.stdout: Interval WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s 2026-03-10T07:03:31.090 INFO:journalctl@ceph.mon.c.vm02.stdout: Interval stall: 00:00:0.000 H:M:S, 0.0 percent 2026-03-10T07:03:31.091 INFO:journalctl@ceph.mon.c.vm02.stdout: 2026-03-10T07:03:31.091 INFO:journalctl@ceph.mon.c.vm02.stdout: ** Compaction Stats [default] ** 2026-03-10T07:03:31.091 INFO:journalctl@ceph.mon.c.vm02.stdout: Level Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB) 2026-03-10T07:03:31.091 INFO:journalctl@ceph.mon.c.vm02.stdout: ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ 2026-03-10T07:03:31.091 INFO:journalctl@ceph.mon.c.vm02.stdout: L0 1/0 5.69 MB 0.2 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 352.5 0.02 0.00 1 0.016 0 0 0.0 0.0 2026-03-10T07:03:31.091 INFO:journalctl@ceph.mon.c.vm02.stdout: L6 1/0 7.78 MB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.00 0.00 0 0.000 0 0 0.0 0.0 2026-03-10T07:03:31.091 INFO:journalctl@ceph.mon.c.vm02.stdout: Sum 2/0 13.47 MB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 352.5 0.02 0.00 1 0.016 0 0 0.0 0.0 2026-03-10T07:03:31.091 INFO:journalctl@ceph.mon.c.vm02.stdout: Int 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 352.5 0.02 0.00 1 0.016 0 0 0.0 0.0 2026-03-10T07:03:31.091 INFO:journalctl@ceph.mon.c.vm02.stdout: 2026-03-10T07:03:31.091 INFO:journalctl@ceph.mon.c.vm02.stdout: ** Compaction Stats [default] ** 2026-03-10T07:03:31.091 INFO:journalctl@ceph.mon.c.vm02.stdout: Priority Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB) 2026-03-10T07:03:31.091 INFO:journalctl@ceph.mon.c.vm02.stdout: --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 2026-03-10T07:03:31.091 INFO:journalctl@ceph.mon.c.vm02.stdout: User 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 352.5 0.02 0.00 1 0.016 0 0 0.0 0.0 2026-03-10T07:03:31.091 INFO:journalctl@ceph.mon.c.vm02.stdout: 2026-03-10T07:03:31.091 INFO:journalctl@ceph.mon.c.vm02.stdout: Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0 2026-03-10T07:03:31.091 INFO:journalctl@ceph.mon.c.vm02.stdout: 2026-03-10T07:03:31.091 INFO:journalctl@ceph.mon.c.vm02.stdout: Uptime(secs): 0.0 total, 0.0 interval 2026-03-10T07:03:31.091 INFO:journalctl@ceph.mon.c.vm02.stdout: Flush(GB): cumulative 0.006, interval 0.006 2026-03-10T07:03:31.091 INFO:journalctl@ceph.mon.c.vm02.stdout: AddFile(GB): cumulative 0.000, interval 0.000 2026-03-10T07:03:31.091 INFO:journalctl@ceph.mon.c.vm02.stdout: AddFile(Total Files): cumulative 0, interval 0 2026-03-10T07:03:31.091 INFO:journalctl@ceph.mon.c.vm02.stdout: AddFile(L0 Files): cumulative 0, interval 0 2026-03-10T07:03:31.091 INFO:journalctl@ceph.mon.c.vm02.stdout: AddFile(Keys): cumulative 0, interval 0 2026-03-10T07:03:31.091 INFO:journalctl@ceph.mon.c.vm02.stdout: Cumulative compaction: 0.01 GB write, 153.11 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-10T07:03:31.091 INFO:journalctl@ceph.mon.c.vm02.stdout: Interval compaction: 0.01 GB write, 153.11 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-10T07:03:31.091 INFO:journalctl@ceph.mon.c.vm02.stdout: Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count 2026-03-10T07:03:31.091 INFO:journalctl@ceph.mon.c.vm02.stdout: Block cache BinnedLRUCache@0x557f5770d350#2 capacity: 512.00 MB usage: 50.20 KB table_size: 0 occupancy: 18446744073709551615 collections: 1 last_copies: 0 last_secs: 1.2e-05 secs_since: 0 2026-03-10T07:03:31.091 INFO:journalctl@ceph.mon.c.vm02.stdout: Block cache entry stats(count,size,portion): DataBlock(3,15.23 KB,0.00290573%) FilterBlock(2,10.81 KB,0.00206232%) IndexBlock(2,24.16 KB,0.00460744%) Misc(1,0.00 KB,0%) 2026-03-10T07:03:31.091 INFO:journalctl@ceph.mon.c.vm02.stdout: 2026-03-10T07:03:31.091 INFO:journalctl@ceph.mon.c.vm02.stdout: ** File Read Latency Histogram By Level [default] ** 2026-03-10T07:03:31.091 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: starting mon.c rank 1 at public addrs [v2:192.168.123.102:3301/0,v1:192.168.123.102:6790/0] at bind addrs [v2:192.168.123.102:3301/0,v1:192.168.123.102:6790/0] mon_data /var/lib/ceph/mon/ceph-c fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc 2026-03-10T07:03:31.091 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: mon.c@-1(???) e3 preinit fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc 2026-03-10T07:03:31.091 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: mon.c@-1(???).mds e1 new map 2026-03-10T07:03:31.091 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: mon.c@-1(???).mds e1 print_map 2026-03-10T07:03:31.091 INFO:journalctl@ceph.mon.c.vm02.stdout: e1 2026-03-10T07:03:31.091 INFO:journalctl@ceph.mon.c.vm02.stdout: btime 1970-01-01T00:00:00:000000+0000 2026-03-10T07:03:31.091 INFO:journalctl@ceph.mon.c.vm02.stdout: enable_multiple, ever_enabled_multiple: 1,1 2026-03-10T07:03:31.091 INFO:journalctl@ceph.mon.c.vm02.stdout: default compat: compat={},rocompat={},incompat={1=base v0.20,2=client writeable ranges,3=default file layouts on dirs,4=dir inode in separate object,5=mds uses versioned encoding,6=dirfrag is stored in omap,8=no anchor table,9=file layout v2,10=snaprealm v2} 2026-03-10T07:03:31.091 INFO:journalctl@ceph.mon.c.vm02.stdout: legacy client fscid: -1 2026-03-10T07:03:31.091 INFO:journalctl@ceph.mon.c.vm02.stdout: 2026-03-10T07:03:31.091 INFO:journalctl@ceph.mon.c.vm02.stdout: No filesystems configured 2026-03-10T07:03:31.091 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: mon.c@-1(???).osd e86 crush map has features 3314933000854323200, adjusting msgr requires 2026-03-10T07:03:31.091 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: mon.c@-1(???).osd e86 crush map has features 432629239337189376, adjusting msgr requires 2026-03-10T07:03:31.091 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: mon.c@-1(???).osd e86 crush map has features 432629239337189376, adjusting msgr requires 2026-03-10T07:03:31.091 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: mon.c@-1(???).osd e86 crush map has features 432629239337189376, adjusting msgr requires 2026-03-10T07:03:31.091 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:30 vm02 ceph-mon[87980]: mon.c@-1(???).paxosservice(auth 1..21) refresh upgraded, format 0 -> 3 2026-03-10T07:03:31.984 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:31 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: ::ffff:192.168.123.105 - - [10/Mar/2026:07:03:31] "GET /metrics HTTP/1.1" 200 34999 "" "Prometheus/2.51.0" 2026-03-10T07:03:32.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:32 vm02 ceph-mon[87980]: mon.c calling monitor election 2026-03-10T07:03:32.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:32 vm02 ceph-mon[87980]: pgmap v6: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail 2026-03-10T07:03:32.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:32 vm02 ceph-mon[87980]: mon.a calling monitor election 2026-03-10T07:03:32.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:32 vm02 ceph-mon[87980]: mon.a is new leader, mons a,c,b in quorum (ranks 0,1,2) 2026-03-10T07:03:32.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:32 vm02 ceph-mon[87980]: monmap epoch 3 2026-03-10T07:03:32.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:32 vm02 ceph-mon[87980]: fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc 2026-03-10T07:03:32.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:32 vm02 ceph-mon[87980]: last_changed 2026-03-10T06:56:52.872880+0000 2026-03-10T07:03:32.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:32 vm02 ceph-mon[87980]: created 2026-03-10T06:56:10.631622+0000 2026-03-10T07:03:32.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:32 vm02 ceph-mon[87980]: min_mon_release 17 (quincy) 2026-03-10T07:03:32.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:32 vm02 ceph-mon[87980]: election_strategy: 1 2026-03-10T07:03:32.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:32 vm02 ceph-mon[87980]: 0: [v2:192.168.123.102:3300/0,v1:192.168.123.102:6789/0] mon.a 2026-03-10T07:03:32.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:32 vm02 ceph-mon[87980]: 1: [v2:192.168.123.102:3301/0,v1:192.168.123.102:6790/0] mon.c 2026-03-10T07:03:32.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:32 vm02 ceph-mon[87980]: 2: [v2:192.168.123.105:3300/0,v1:192.168.123.105:6789/0] mon.b 2026-03-10T07:03:32.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:32 vm02 ceph-mon[87980]: fsmap 2026-03-10T07:03:32.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:32 vm02 ceph-mon[87980]: osdmap e86: 8 total, 8 up, 8 in 2026-03-10T07:03:32.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:32 vm02 ceph-mon[87980]: mgrmap e33: y(active, since 6s), standbys: x 2026-03-10T07:03:32.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:32 vm02 ceph-mon[87980]: overall HEALTH_OK 2026-03-10T07:03:32.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:32 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:32.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:32 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:32.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:32 vm02 ceph-mon[86149]: mon.c calling monitor election 2026-03-10T07:03:32.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:32 vm02 ceph-mon[86149]: pgmap v6: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail 2026-03-10T07:03:32.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:32 vm02 ceph-mon[86149]: mon.a calling monitor election 2026-03-10T07:03:32.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:32 vm02 ceph-mon[86149]: mon.a is new leader, mons a,c,b in quorum (ranks 0,1,2) 2026-03-10T07:03:32.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:32 vm02 ceph-mon[86149]: monmap epoch 3 2026-03-10T07:03:32.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:32 vm02 ceph-mon[86149]: fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc 2026-03-10T07:03:32.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:32 vm02 ceph-mon[86149]: last_changed 2026-03-10T06:56:52.872880+0000 2026-03-10T07:03:32.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:32 vm02 ceph-mon[86149]: created 2026-03-10T06:56:10.631622+0000 2026-03-10T07:03:32.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:32 vm02 ceph-mon[86149]: min_mon_release 17 (quincy) 2026-03-10T07:03:32.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:32 vm02 ceph-mon[86149]: election_strategy: 1 2026-03-10T07:03:32.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:32 vm02 ceph-mon[86149]: 0: [v2:192.168.123.102:3300/0,v1:192.168.123.102:6789/0] mon.a 2026-03-10T07:03:32.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:32 vm02 ceph-mon[86149]: 1: [v2:192.168.123.102:3301/0,v1:192.168.123.102:6790/0] mon.c 2026-03-10T07:03:32.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:32 vm02 ceph-mon[86149]: 2: [v2:192.168.123.105:3300/0,v1:192.168.123.105:6789/0] mon.b 2026-03-10T07:03:32.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:32 vm02 ceph-mon[86149]: fsmap 2026-03-10T07:03:32.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:32 vm02 ceph-mon[86149]: osdmap e86: 8 total, 8 up, 8 in 2026-03-10T07:03:32.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:32 vm02 ceph-mon[86149]: mgrmap e33: y(active, since 6s), standbys: x 2026-03-10T07:03:32.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:32 vm02 ceph-mon[86149]: overall HEALTH_OK 2026-03-10T07:03:32.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:32 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:32.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:32 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:32.686 INFO:teuthology.orchestra.run.vm02.stdout:true 2026-03-10T07:03:32.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:32 vm05 ceph-mon[48591]: mon.c calling monitor election 2026-03-10T07:03:32.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:32 vm05 ceph-mon[48591]: pgmap v6: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail 2026-03-10T07:03:32.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:32 vm05 ceph-mon[48591]: mon.a calling monitor election 2026-03-10T07:03:32.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:32 vm05 ceph-mon[48591]: mon.a is new leader, mons a,c,b in quorum (ranks 0,1,2) 2026-03-10T07:03:32.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:32 vm05 ceph-mon[48591]: monmap epoch 3 2026-03-10T07:03:32.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:32 vm05 ceph-mon[48591]: fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc 2026-03-10T07:03:32.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:32 vm05 ceph-mon[48591]: last_changed 2026-03-10T06:56:52.872880+0000 2026-03-10T07:03:32.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:32 vm05 ceph-mon[48591]: created 2026-03-10T06:56:10.631622+0000 2026-03-10T07:03:32.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:32 vm05 ceph-mon[48591]: min_mon_release 17 (quincy) 2026-03-10T07:03:32.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:32 vm05 ceph-mon[48591]: election_strategy: 1 2026-03-10T07:03:32.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:32 vm05 ceph-mon[48591]: 0: [v2:192.168.123.102:3300/0,v1:192.168.123.102:6789/0] mon.a 2026-03-10T07:03:32.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:32 vm05 ceph-mon[48591]: 1: [v2:192.168.123.102:3301/0,v1:192.168.123.102:6790/0] mon.c 2026-03-10T07:03:32.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:32 vm05 ceph-mon[48591]: 2: [v2:192.168.123.105:3300/0,v1:192.168.123.105:6789/0] mon.b 2026-03-10T07:03:32.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:32 vm05 ceph-mon[48591]: fsmap 2026-03-10T07:03:32.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:32 vm05 ceph-mon[48591]: osdmap e86: 8 total, 8 up, 8 in 2026-03-10T07:03:32.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:32 vm05 ceph-mon[48591]: mgrmap e33: y(active, since 6s), standbys: x 2026-03-10T07:03:32.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:32 vm05 ceph-mon[48591]: overall HEALTH_OK 2026-03-10T07:03:32.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:32 vm05 ceph-mon[48591]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:32.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:32 vm05 ceph-mon[48591]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:33.153 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T07:03:33.153 INFO:teuthology.orchestra.run.vm02.stdout:alertmanager.a vm02 *:9093,9094 running (40s) 0s ago 4m 20.4M - 0.25.0 c8568f914cd2 520cbcc5ad98 2026-03-10T07:03:33.153 INFO:teuthology.orchestra.run.vm02.stdout:grafana.a vm05 *:3000 running (30s) 6s ago 4m 41.0M - dad864ee21e9 34567dcb4b51 2026-03-10T07:03:33.153 INFO:teuthology.orchestra.run.vm02.stdout:iscsi.foo.vm02.iphfbm vm02 running (19s) 0s ago 4m 45.5M - 3.5 e1d6a67b021e a06caff18850 2026-03-10T07:03:33.153 INFO:teuthology.orchestra.run.vm02.stdout:mgr.x vm05 *:8443,9283,8765 running (16s) 6s ago 6m 485M - 19.2.3-678-ge911bdeb 654f31e6858e cdd4c8db2d43 2026-03-10T07:03:33.153 INFO:teuthology.orchestra.run.vm02.stdout:mgr.y vm02 *:8443,9283,8765 running (29s) 0s ago 7m 540M - 19.2.3-678-ge911bdeb 654f31e6858e e1dc22afcf31 2026-03-10T07:03:33.153 INFO:teuthology.orchestra.run.vm02.stdout:mon.a vm02 running (11s) 0s ago 7m 38.7M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 194e84dd73c4 2026-03-10T07:03:33.153 INFO:teuthology.orchestra.run.vm02.stdout:mon.b vm05 running (6m) 6s ago 6m 51.8M 2048M 17.2.0 e1d6a67b021e c223c571b7ce 2026-03-10T07:03:33.153 INFO:teuthology.orchestra.run.vm02.stdout:mon.c vm02 running (2s) 0s ago 6m 28.2M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 959c5054b3d9 2026-03-10T07:03:33.153 INFO:teuthology.orchestra.run.vm02.stdout:node-exporter.a vm02 *:9100 running (43s) 0s ago 5m 9038k - 1.7.0 72c9c2088986 0bc0b34c732a 2026-03-10T07:03:33.153 INFO:teuthology.orchestra.run.vm02.stdout:node-exporter.b vm05 *:9100 running (36s) 6s ago 5m 8749k - 1.7.0 72c9c2088986 0129ed456f9d 2026-03-10T07:03:33.153 INFO:teuthology.orchestra.run.vm02.stdout:osd.0 vm02 running (6m) 0s ago 6m 51.5M 4096M 17.2.0 e1d6a67b021e 0e972212a251 2026-03-10T07:03:33.153 INFO:teuthology.orchestra.run.vm02.stdout:osd.1 vm02 running (6m) 0s ago 6m 56.0M 4096M 17.2.0 e1d6a67b021e a9f4c8b44f48 2026-03-10T07:03:33.153 INFO:teuthology.orchestra.run.vm02.stdout:osd.2 vm02 running (6m) 0s ago 6m 50.0M 4096M 17.2.0 e1d6a67b021e 6409c5be18aa 2026-03-10T07:03:33.153 INFO:teuthology.orchestra.run.vm02.stdout:osd.3 vm02 running (5m) 0s ago 5m 49.7M 4096M 17.2.0 e1d6a67b021e 5df7cfae139f 2026-03-10T07:03:33.153 INFO:teuthology.orchestra.run.vm02.stdout:osd.4 vm05 running (5m) 6s ago 5m 52.7M 4096M 17.2.0 e1d6a67b021e dcf9e1b40e11 2026-03-10T07:03:33.153 INFO:teuthology.orchestra.run.vm02.stdout:osd.5 vm05 running (5m) 6s ago 5m 50.6M 4096M 17.2.0 e1d6a67b021e ef1a70593f89 2026-03-10T07:03:33.153 INFO:teuthology.orchestra.run.vm02.stdout:osd.6 vm05 running (5m) 6s ago 5m 53.2M 4096M 17.2.0 e1d6a67b021e 9f83e32d5eb6 2026-03-10T07:03:33.153 INFO:teuthology.orchestra.run.vm02.stdout:osd.7 vm05 running (5m) 6s ago 5m 51.5M 4096M 17.2.0 e1d6a67b021e 83d454094982 2026-03-10T07:03:33.153 INFO:teuthology.orchestra.run.vm02.stdout:prometheus.a vm05 *:9095 running (18s) 6s ago 5m 40.3M - 2.51.0 1d3b7f56885b 2f14ff887bc7 2026-03-10T07:03:33.153 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.kkmsll vm02 *:8000 running (4m) 0s ago 4m 93.6M - 17.2.0 e1d6a67b021e ef903c439808 2026-03-10T07:03:33.153 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm05.bmslvs vm05 *:8000 running (4m) 6s ago 4m 93.5M - 17.2.0 e1d6a67b021e acd35f4810d9 2026-03-10T07:03:33.153 INFO:teuthology.orchestra.run.vm02.stdout:rgw.smpl.vm02.kyvfxo vm02 *:80 running (4m) 0s ago 4m 91.7M - 17.2.0 e1d6a67b021e 6c68381e5378 2026-03-10T07:03:33.153 INFO:teuthology.orchestra.run.vm02.stdout:rgw.smpl.vm05.xjafam vm05 *:80 running (4m) 6s ago 4m 93.3M - 17.2.0 e1d6a67b021e 62a81876b05e 2026-03-10T07:03:33.454 INFO:teuthology.orchestra.run.vm02.stdout:{ 2026-03-10T07:03:33.455 INFO:teuthology.orchestra.run.vm02.stdout: "mon": { 2026-03-10T07:03:33.455 INFO:teuthology.orchestra.run.vm02.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 1, 2026-03-10T07:03:33.455 INFO:teuthology.orchestra.run.vm02.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-10T07:03:33.455 INFO:teuthology.orchestra.run.vm02.stdout: }, 2026-03-10T07:03:33.455 INFO:teuthology.orchestra.run.vm02.stdout: "mgr": { 2026-03-10T07:03:33.455 INFO:teuthology.orchestra.run.vm02.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-10T07:03:33.455 INFO:teuthology.orchestra.run.vm02.stdout: }, 2026-03-10T07:03:33.455 INFO:teuthology.orchestra.run.vm02.stdout: "osd": { 2026-03-10T07:03:33.455 INFO:teuthology.orchestra.run.vm02.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 8 2026-03-10T07:03:33.455 INFO:teuthology.orchestra.run.vm02.stdout: }, 2026-03-10T07:03:33.455 INFO:teuthology.orchestra.run.vm02.stdout: "rgw": { 2026-03-10T07:03:33.455 INFO:teuthology.orchestra.run.vm02.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 4 2026-03-10T07:03:33.455 INFO:teuthology.orchestra.run.vm02.stdout: }, 2026-03-10T07:03:33.455 INFO:teuthology.orchestra.run.vm02.stdout: "overall": { 2026-03-10T07:03:33.455 INFO:teuthology.orchestra.run.vm02.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 13, 2026-03-10T07:03:33.455 INFO:teuthology.orchestra.run.vm02.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 4 2026-03-10T07:03:33.455 INFO:teuthology.orchestra.run.vm02.stdout: } 2026-03-10T07:03:33.455 INFO:teuthology.orchestra.run.vm02.stdout:} 2026-03-10T07:03:33.730 INFO:teuthology.orchestra.run.vm02.stdout:{ 2026-03-10T07:03:33.730 INFO:teuthology.orchestra.run.vm02.stdout: "target_image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", 2026-03-10T07:03:33.730 INFO:teuthology.orchestra.run.vm02.stdout: "in_progress": true, 2026-03-10T07:03:33.730 INFO:teuthology.orchestra.run.vm02.stdout: "which": "Upgrading all daemon types on all hosts", 2026-03-10T07:03:33.730 INFO:teuthology.orchestra.run.vm02.stdout: "services_complete": [ 2026-03-10T07:03:33.730 INFO:teuthology.orchestra.run.vm02.stdout: "mgr" 2026-03-10T07:03:33.730 INFO:teuthology.orchestra.run.vm02.stdout: ], 2026-03-10T07:03:33.730 INFO:teuthology.orchestra.run.vm02.stdout: "progress": "4/23 daemons upgraded", 2026-03-10T07:03:33.730 INFO:teuthology.orchestra.run.vm02.stdout: "message": "Currently upgrading mon daemons", 2026-03-10T07:03:33.730 INFO:teuthology.orchestra.run.vm02.stdout: "is_paused": false 2026-03-10T07:03:33.730 INFO:teuthology.orchestra.run.vm02.stdout:} 2026-03-10T07:03:34.005 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:33 vm02 ceph-mon[86149]: from='client.24926 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:03:34.005 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:33 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:34.005 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:33 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:34.005 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:33 vm02 ceph-mon[86149]: from='client.34140 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:03:34.005 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:33 vm02 ceph-mon[86149]: pgmap v7: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 26 KiB/s rd, 0 B/s wr, 11 op/s 2026-03-10T07:03:34.005 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:33 vm02 ceph-mon[86149]: from='client.34146 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:03:34.005 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:33 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:34.005 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:33 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:34.005 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:33 vm02 ceph-mon[86149]: from='client.? 192.168.123.102:0/2419383302' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:03:34.005 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:33 vm02 ceph-mon[87980]: from='client.24926 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:03:34.005 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:33 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:34.005 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:33 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:34.005 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:33 vm02 ceph-mon[87980]: from='client.34140 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:03:34.005 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:33 vm02 ceph-mon[87980]: pgmap v7: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 26 KiB/s rd, 0 B/s wr, 11 op/s 2026-03-10T07:03:34.005 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:33 vm02 ceph-mon[87980]: from='client.34146 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:03:34.005 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:33 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:34.005 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:33 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:34.005 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:33 vm02 ceph-mon[87980]: from='client.? 192.168.123.102:0/2419383302' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:03:34.005 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_OK 2026-03-10T07:03:34.148 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:33 vm05 ceph-mon[48591]: from='client.24926 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:03:34.148 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:33 vm05 ceph-mon[48591]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:34.148 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:33 vm05 ceph-mon[48591]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:34.148 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:33 vm05 ceph-mon[48591]: from='client.34140 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:03:34.148 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:33 vm05 ceph-mon[48591]: pgmap v7: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 26 KiB/s rd, 0 B/s wr, 11 op/s 2026-03-10T07:03:34.148 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:33 vm05 ceph-mon[48591]: from='client.34146 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:03:34.148 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:33 vm05 ceph-mon[48591]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:34.148 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:33 vm05 ceph-mon[48591]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:34.148 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:33 vm05 ceph-mon[48591]: from='client.? 192.168.123.102:0/2419383302' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:03:34.503 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:34 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:03:34.147Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"28bd35e6-1c4e-11f1-9057-21b3549603fc\", cluster_addr=\"192.168.123.102\", device_class=\"hdd\", hostname=\"vm02\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.102\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.102\", device_class=\"hdd\", hostname=\"vm02\", instance=\"192.168.123.105:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.102\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T07:03:34.877 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:34 vm05 ceph-mon[48591]: from='client.34155 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:03:34.877 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:34 vm05 ceph-mon[48591]: from='client.? 192.168.123.102:0/3471290502' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T07:03:34.877 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:34 vm05 ceph-mon[48591]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:35.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:34 vm05 ceph-mon[48591]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:35.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:34 vm05 ceph-mon[48591]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:03:35.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:34 vm05 ceph-mon[48591]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T07:03:35.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:34 vm05 ceph-mon[48591]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:35.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:34 vm05 ceph-mon[48591]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T07:03:35.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:34 vm05 ceph-mon[48591]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:03:35.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:34 vm05 ceph-mon[48591]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "quorum_status"}]: dispatch 2026-03-10T07:03:35.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:34 vm05 ceph-mon[48591]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "mon ok-to-stop", "ids": ["b"]}]: dispatch 2026-03-10T07:03:35.334 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:34 vm02 ceph-mon[87980]: from='client.34155 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:03:35.334 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:34 vm02 ceph-mon[87980]: from='client.? 192.168.123.102:0/3471290502' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T07:03:35.334 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:34 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:35.334 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:34 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:35.334 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:34 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:03:35.334 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:34 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T07:03:35.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:34 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:35.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:34 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T07:03:35.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:34 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:03:35.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:34 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "quorum_status"}]: dispatch 2026-03-10T07:03:35.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:34 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "mon ok-to-stop", "ids": ["b"]}]: dispatch 2026-03-10T07:03:35.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:34 vm02 ceph-mon[86149]: from='client.34155 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:03:35.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:34 vm02 ceph-mon[86149]: from='client.? 192.168.123.102:0/3471290502' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T07:03:35.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:34 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:35.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:34 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:35.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:34 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:03:35.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:34 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T07:03:35.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:34 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:35.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:34 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T07:03:35.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:34 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:03:35.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:34 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "quorum_status"}]: dispatch 2026-03-10T07:03:35.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:34 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "mon ok-to-stop", "ids": ["b"]}]: dispatch 2026-03-10T07:03:35.650 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:35 vm05 systemd[1]: Stopping Ceph mon.b for 28bd35e6-1c4e-11f1-9057-21b3549603fc... 2026-03-10T07:03:35.975 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:35 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mon-b[48587]: 2026-03-10T07:03:35.648+0000 7f5c40570700 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-mon -n mon.b -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false (PID: 1) UID: 0 2026-03-10T07:03:35.975 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:35 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mon-b[48587]: 2026-03-10T07:03:35.648+0000 7f5c40570700 -1 mon.b@2(peon) e3 *** Got Signal Terminated *** 2026-03-10T07:03:35.975 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:35 vm05 podman[76482]: 2026-03-10 07:03:35.715870998 +0000 UTC m=+0.081009696 container died c223c571b7cee0ec9607e37022ecf695a58864a24adf9c32b89f2757ba8f8a49 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mon-b, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, GIT_REPO=https://github.com/ceph/ceph-container.git, build-date=2022-05-03T08:36:31.336870, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., version=8, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, io.openshift.expose-services=, io.openshift.tags=base centos centos-stream, io.buildah.version=1.19.8, distribution-scope=public, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, maintainer=Guillaume Abrioux , vendor=Red Hat, Inc., ceph=True, architecture=x86_64, GIT_CLEAN=True, RELEASE=HEAD, io.k8s.display-name=CentOS Stream 8, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, GIT_BRANCH=HEAD, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, com.redhat.component=centos-stream-container, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, name=centos-stream, release=754, vcs-type=git, CEPH_POINT_RELEASE=-17.2.0) 2026-03-10T07:03:35.975 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:35 vm05 podman[76482]: 2026-03-10 07:03:35.73512044 +0000 UTC m=+0.100259138 container remove c223c571b7cee0ec9607e37022ecf695a58864a24adf9c32b89f2757ba8f8a49 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mon-b, ceph=True, RELEASE=HEAD, architecture=x86_64, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, release=754, GIT_REPO=https://github.com/ceph/ceph-container.git, com.redhat.component=centos-stream-container, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, vendor=Red Hat, Inc., distribution-scope=public, version=8, build-date=2022-05-03T08:36:31.336870, io.buildah.version=1.19.8, name=centos-stream, io.k8s.display-name=CentOS Stream 8, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, vcs-type=git, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, GIT_CLEAN=True, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, CEPH_POINT_RELEASE=-17.2.0, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, io.openshift.expose-services=, maintainer=Guillaume Abrioux , io.openshift.tags=base centos centos-stream, GIT_BRANCH=HEAD) 2026-03-10T07:03:35.975 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:35 vm05 bash[76482]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mon-b 2026-03-10T07:03:35.975 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:35 vm05 systemd[1]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@mon.b.service: Deactivated successfully. 2026-03-10T07:03:35.975 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:35 vm05 systemd[1]: Stopped Ceph mon.b for 28bd35e6-1c4e-11f1-9057-21b3549603fc. 2026-03-10T07:03:35.975 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:35 vm05 systemd[1]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@mon.b.service: Consumed 5.612s CPU time. 2026-03-10T07:03:35.975 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:35 vm05 systemd[1]: Starting Ceph mon.b for 28bd35e6-1c4e-11f1-9057-21b3549603fc... 2026-03-10T07:03:36.039 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:35 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: 2026-03-10T07:03:35.707+0000 7f03c12b6640 -1 mgr.server handle_report got status from non-daemon mon.c 2026-03-10T07:03:36.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 podman[76589]: 2026-03-10 07:03:36.079846997 +0000 UTC m=+0.017942918 container create e901cead026d8f9cf45cbc65aa6e3098096c69ecb92fe6da5c6e5904b52f52a6 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mon-b, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.label-schema.build-date=20260223, CEPH_REF=squid, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.authors=Ceph Release Team , io.buildah.version=1.41.3) 2026-03-10T07:03:36.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 podman[76589]: 2026-03-10 07:03:36.115654575 +0000 UTC m=+0.053750505 container init e901cead026d8f9cf45cbc65aa6e3098096c69ecb92fe6da5c6e5904b52f52a6 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mon-b, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, ceph=True, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS, CEPH_REF=squid, org.label-schema.schema-version=1.0, OSD_FLAVOR=default) 2026-03-10T07:03:36.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 podman[76589]: 2026-03-10 07:03:36.119853472 +0000 UTC m=+0.057949393 container start e901cead026d8f9cf45cbc65aa6e3098096c69ecb92fe6da5c6e5904b52f52a6 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mon-b, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.build-date=20260223, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid, org.label-schema.schema-version=1.0) 2026-03-10T07:03:36.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 bash[76589]: e901cead026d8f9cf45cbc65aa6e3098096c69ecb92fe6da5c6e5904b52f52a6 2026-03-10T07:03:36.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 podman[76589]: 2026-03-10 07:03:36.072811401 +0000 UTC m=+0.010907331 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T07:03:36.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 systemd[1]: Started Ceph mon.b for 28bd35e6-1c4e-11f1-9057-21b3549603fc. 2026-03-10T07:03:36.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: set uid:gid to 167:167 (ceph:ceph) 2026-03-10T07:03:36.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable), process ceph-mon, pid 2 2026-03-10T07:03:36.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: pidfile_write: ignore empty --pid-file 2026-03-10T07:03:36.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: load: jerasure load: lrc 2026-03-10T07:03:36.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: RocksDB version: 7.9.2 2026-03-10T07:03:36.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Git sha 0 2026-03-10T07:03:36.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Compile date 2026-02-25 18:11:04 2026-03-10T07:03:36.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: DB SUMMARY 2026-03-10T07:03:36.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: DB Session ID: GNTOYB2PN4TD0J7QURNY 2026-03-10T07:03:36.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: CURRENT file: CURRENT 2026-03-10T07:03:36.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: IDENTITY file: IDENTITY 2026-03-10T07:03:36.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: MANIFEST file: MANIFEST-000009 size: 503 Bytes 2026-03-10T07:03:36.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: SST files in /var/lib/ceph/mon/ceph-b/store.db dir, Total Num: 1, files: 000018.sst 2026-03-10T07:03:36.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Write Ahead Log file in /var/lib/ceph/mon/ceph-b/store.db: 000016.log size: 2595 ; 2026-03-10T07:03:36.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.error_if_exists: 0 2026-03-10T07:03:36.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.create_if_missing: 0 2026-03-10T07:03:36.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.paranoid_checks: 1 2026-03-10T07:03:36.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.flush_verify_memtable_count: 1 2026-03-10T07:03:36.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.track_and_verify_wals_in_manifest: 0 2026-03-10T07:03:36.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.verify_sst_unique_id_in_manifest: 1 2026-03-10T07:03:36.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.env: 0x561865d78dc0 2026-03-10T07:03:36.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.fs: PosixFileSystem 2026-03-10T07:03:36.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.info_log: 0x5618678d85c0 2026-03-10T07:03:36.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.max_file_opening_threads: 16 2026-03-10T07:03:36.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.statistics: (nil) 2026-03-10T07:03:36.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.use_fsync: 0 2026-03-10T07:03:36.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.max_log_file_size: 0 2026-03-10T07:03:36.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.max_manifest_file_size: 1073741824 2026-03-10T07:03:36.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.log_file_time_to_roll: 0 2026-03-10T07:03:36.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.keep_log_file_num: 1000 2026-03-10T07:03:36.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.recycle_log_file_num: 0 2026-03-10T07:03:36.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.allow_fallocate: 1 2026-03-10T07:03:36.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.allow_mmap_reads: 0 2026-03-10T07:03:36.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.allow_mmap_writes: 0 2026-03-10T07:03:36.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.use_direct_reads: 0 2026-03-10T07:03:36.255 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.use_direct_io_for_flush_and_compaction: 0 2026-03-10T07:03:36.255 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.create_missing_column_families: 0 2026-03-10T07:03:36.255 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.db_log_dir: 2026-03-10T07:03:36.255 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.wal_dir: 2026-03-10T07:03:36.255 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.table_cache_numshardbits: 6 2026-03-10T07:03:36.255 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.WAL_ttl_seconds: 0 2026-03-10T07:03:36.255 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.WAL_size_limit_MB: 0 2026-03-10T07:03:36.255 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.max_write_batch_group_size_bytes: 1048576 2026-03-10T07:03:36.255 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.manifest_preallocation_size: 4194304 2026-03-10T07:03:36.255 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.is_fd_close_on_exec: 1 2026-03-10T07:03:36.255 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.advise_random_on_open: 1 2026-03-10T07:03:36.255 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.db_write_buffer_size: 0 2026-03-10T07:03:36.255 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.write_buffer_manager: 0x5618678dd900 2026-03-10T07:03:36.255 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.access_hint_on_compaction_start: 1 2026-03-10T07:03:36.255 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.random_access_max_buffer_size: 1048576 2026-03-10T07:03:36.255 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.use_adaptive_mutex: 0 2026-03-10T07:03:36.255 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.rate_limiter: (nil) 2026-03-10T07:03:36.255 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.sst_file_manager.rate_bytes_per_sec: 0 2026-03-10T07:03:36.255 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.wal_recovery_mode: 2 2026-03-10T07:03:36.255 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.enable_thread_tracking: 0 2026-03-10T07:03:36.255 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.enable_pipelined_write: 0 2026-03-10T07:03:36.255 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.unordered_write: 0 2026-03-10T07:03:36.255 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.allow_concurrent_memtable_write: 1 2026-03-10T07:03:36.255 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.enable_write_thread_adaptive_yield: 1 2026-03-10T07:03:36.255 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.write_thread_max_yield_usec: 100 2026-03-10T07:03:36.255 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.write_thread_slow_yield_usec: 3 2026-03-10T07:03:36.255 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.row_cache: None 2026-03-10T07:03:36.255 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.wal_filter: None 2026-03-10T07:03:36.255 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.avoid_flush_during_recovery: 0 2026-03-10T07:03:36.255 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.allow_ingest_behind: 0 2026-03-10T07:03:36.255 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.two_write_queues: 0 2026-03-10T07:03:36.255 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.manual_wal_flush: 0 2026-03-10T07:03:36.255 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.wal_compression: 0 2026-03-10T07:03:36.255 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.atomic_flush: 0 2026-03-10T07:03:36.255 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.avoid_unnecessary_blocking_io: 0 2026-03-10T07:03:36.255 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.persist_stats_to_disk: 0 2026-03-10T07:03:36.255 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.write_dbid_to_manifest: 0 2026-03-10T07:03:36.255 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.log_readahead_size: 0 2026-03-10T07:03:36.255 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.file_checksum_gen_factory: Unknown 2026-03-10T07:03:36.255 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.best_efforts_recovery: 0 2026-03-10T07:03:36.255 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.max_bgerror_resume_count: 2147483647 2026-03-10T07:03:36.255 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.bgerror_resume_retry_interval: 1000000 2026-03-10T07:03:36.255 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.allow_data_in_errors: 0 2026-03-10T07:03:36.255 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.db_host_id: __hostname__ 2026-03-10T07:03:36.255 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.enforce_single_del_contracts: true 2026-03-10T07:03:36.255 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.max_background_jobs: 2 2026-03-10T07:03:36.255 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.max_background_compactions: -1 2026-03-10T07:03:36.255 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.max_subcompactions: 1 2026-03-10T07:03:36.256 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.avoid_flush_during_shutdown: 0 2026-03-10T07:03:36.256 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.writable_file_max_buffer_size: 1048576 2026-03-10T07:03:36.256 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.delayed_write_rate : 16777216 2026-03-10T07:03:36.256 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.max_total_wal_size: 0 2026-03-10T07:03:36.256 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.delete_obsolete_files_period_micros: 21600000000 2026-03-10T07:03:36.256 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.stats_dump_period_sec: 600 2026-03-10T07:03:36.256 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.stats_persist_period_sec: 600 2026-03-10T07:03:36.256 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.stats_history_buffer_size: 1048576 2026-03-10T07:03:36.256 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.max_open_files: -1 2026-03-10T07:03:36.256 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.bytes_per_sync: 0 2026-03-10T07:03:36.256 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.wal_bytes_per_sync: 0 2026-03-10T07:03:36.256 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.strict_bytes_per_sync: 0 2026-03-10T07:03:36.256 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.compaction_readahead_size: 0 2026-03-10T07:03:36.256 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.max_background_flushes: -1 2026-03-10T07:03:36.256 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Compression algorithms supported: 2026-03-10T07:03:36.256 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: kZSTD supported: 0 2026-03-10T07:03:36.256 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: kXpressCompression supported: 0 2026-03-10T07:03:36.256 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: kBZip2Compression supported: 0 2026-03-10T07:03:36.256 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: kZSTDNotFinalCompression supported: 0 2026-03-10T07:03:36.256 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: kLZ4Compression supported: 1 2026-03-10T07:03:36.256 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: kZlibCompression supported: 1 2026-03-10T07:03:36.256 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: kLZ4HCCompression supported: 1 2026-03-10T07:03:36.256 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: kSnappyCompression supported: 1 2026-03-10T07:03:36.256 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Fast CRC32 supported: Supported on x86 2026-03-10T07:03:36.256 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: DMutex implementation: pthread_mutex_t 2026-03-10T07:03:36.256 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: [db/version_set.cc:5527] Recovering from manifest file: /var/lib/ceph/mon/ceph-b/store.db/MANIFEST-000009 2026-03-10T07:03:36.256 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [default]: 2026-03-10T07:03:36.256 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.comparator: leveldb.BytewiseComparator 2026-03-10T07:03:36.256 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.merge_operator: 2026-03-10T07:03:36.256 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.compaction_filter: None 2026-03-10T07:03:36.256 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.compaction_filter_factory: None 2026-03-10T07:03:36.256 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.sst_partitioner_factory: None 2026-03-10T07:03:36.256 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.memtable_factory: SkipListFactory 2026-03-10T07:03:36.256 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.table_factory: BlockBasedTable 2026-03-10T07:03:36.256 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: table_factory options: flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x5618678d85a0) 2026-03-10T07:03:36.256 INFO:journalctl@ceph.mon.b.vm05.stdout: cache_index_and_filter_blocks: 1 2026-03-10T07:03:36.256 INFO:journalctl@ceph.mon.b.vm05.stdout: cache_index_and_filter_blocks_with_high_priority: 0 2026-03-10T07:03:36.256 INFO:journalctl@ceph.mon.b.vm05.stdout: pin_l0_filter_and_index_blocks_in_cache: 0 2026-03-10T07:03:36.256 INFO:journalctl@ceph.mon.b.vm05.stdout: pin_top_level_index_and_filter: 1 2026-03-10T07:03:36.256 INFO:journalctl@ceph.mon.b.vm05.stdout: index_type: 0 2026-03-10T07:03:36.256 INFO:journalctl@ceph.mon.b.vm05.stdout: data_block_index_type: 0 2026-03-10T07:03:36.256 INFO:journalctl@ceph.mon.b.vm05.stdout: index_shortening: 1 2026-03-10T07:03:36.256 INFO:journalctl@ceph.mon.b.vm05.stdout: data_block_hash_table_util_ratio: 0.750000 2026-03-10T07:03:36.256 INFO:journalctl@ceph.mon.b.vm05.stdout: checksum: 4 2026-03-10T07:03:36.256 INFO:journalctl@ceph.mon.b.vm05.stdout: no_block_cache: 0 2026-03-10T07:03:36.256 INFO:journalctl@ceph.mon.b.vm05.stdout: block_cache: 0x5618678fd350 2026-03-10T07:03:36.256 INFO:journalctl@ceph.mon.b.vm05.stdout: block_cache_name: BinnedLRUCache 2026-03-10T07:03:36.256 INFO:journalctl@ceph.mon.b.vm05.stdout: block_cache_options: 2026-03-10T07:03:36.256 INFO:journalctl@ceph.mon.b.vm05.stdout: capacity : 536870912 2026-03-10T07:03:36.256 INFO:journalctl@ceph.mon.b.vm05.stdout: num_shard_bits : 4 2026-03-10T07:03:36.256 INFO:journalctl@ceph.mon.b.vm05.stdout: strict_capacity_limit : 0 2026-03-10T07:03:36.256 INFO:journalctl@ceph.mon.b.vm05.stdout: high_pri_pool_ratio: 0.000 2026-03-10T07:03:36.256 INFO:journalctl@ceph.mon.b.vm05.stdout: block_cache_compressed: (nil) 2026-03-10T07:03:36.256 INFO:journalctl@ceph.mon.b.vm05.stdout: persistent_cache: (nil) 2026-03-10T07:03:36.256 INFO:journalctl@ceph.mon.b.vm05.stdout: block_size: 4096 2026-03-10T07:03:36.256 INFO:journalctl@ceph.mon.b.vm05.stdout: block_size_deviation: 10 2026-03-10T07:03:36.256 INFO:journalctl@ceph.mon.b.vm05.stdout: block_restart_interval: 16 2026-03-10T07:03:36.256 INFO:journalctl@ceph.mon.b.vm05.stdout: index_block_restart_interval: 1 2026-03-10T07:03:36.256 INFO:journalctl@ceph.mon.b.vm05.stdout: metadata_block_size: 4096 2026-03-10T07:03:36.256 INFO:journalctl@ceph.mon.b.vm05.stdout: partition_filters: 0 2026-03-10T07:03:36.256 INFO:journalctl@ceph.mon.b.vm05.stdout: use_delta_encoding: 1 2026-03-10T07:03:36.256 INFO:journalctl@ceph.mon.b.vm05.stdout: filter_policy: bloomfilter 2026-03-10T07:03:36.256 INFO:journalctl@ceph.mon.b.vm05.stdout: whole_key_filtering: 1 2026-03-10T07:03:36.257 INFO:journalctl@ceph.mon.b.vm05.stdout: verify_compression: 0 2026-03-10T07:03:36.257 INFO:journalctl@ceph.mon.b.vm05.stdout: read_amp_bytes_per_bit: 0 2026-03-10T07:03:36.257 INFO:journalctl@ceph.mon.b.vm05.stdout: format_version: 5 2026-03-10T07:03:36.257 INFO:journalctl@ceph.mon.b.vm05.stdout: enable_index_compression: 1 2026-03-10T07:03:36.257 INFO:journalctl@ceph.mon.b.vm05.stdout: block_align: 0 2026-03-10T07:03:36.257 INFO:journalctl@ceph.mon.b.vm05.stdout: max_auto_readahead_size: 262144 2026-03-10T07:03:36.257 INFO:journalctl@ceph.mon.b.vm05.stdout: prepopulate_block_cache: 0 2026-03-10T07:03:36.257 INFO:journalctl@ceph.mon.b.vm05.stdout: initial_auto_readahead_size: 8192 2026-03-10T07:03:36.257 INFO:journalctl@ceph.mon.b.vm05.stdout: num_file_reads_for_auto_readahead: 2 2026-03-10T07:03:36.257 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.write_buffer_size: 33554432 2026-03-10T07:03:36.257 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.max_write_buffer_number: 2 2026-03-10T07:03:36.257 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.compression: NoCompression 2026-03-10T07:03:36.257 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.bottommost_compression: Disabled 2026-03-10T07:03:36.257 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.prefix_extractor: nullptr 2026-03-10T07:03:36.257 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.memtable_insert_with_hint_prefix_extractor: nullptr 2026-03-10T07:03:36.257 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.num_levels: 7 2026-03-10T07:03:36.257 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.min_write_buffer_number_to_merge: 1 2026-03-10T07:03:36.257 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.max_write_buffer_number_to_maintain: 0 2026-03-10T07:03:36.257 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.max_write_buffer_size_to_maintain: 0 2026-03-10T07:03:36.257 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.bottommost_compression_opts.window_bits: -14 2026-03-10T07:03:36.257 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.bottommost_compression_opts.level: 32767 2026-03-10T07:03:36.257 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.bottommost_compression_opts.strategy: 0 2026-03-10T07:03:36.257 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.bottommost_compression_opts.max_dict_bytes: 0 2026-03-10T07:03:36.257 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.bottommost_compression_opts.zstd_max_train_bytes: 0 2026-03-10T07:03:36.257 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.bottommost_compression_opts.parallel_threads: 1 2026-03-10T07:03:36.257 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.bottommost_compression_opts.enabled: false 2026-03-10T07:03:36.257 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.bottommost_compression_opts.max_dict_buffer_bytes: 0 2026-03-10T07:03:36.257 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.bottommost_compression_opts.use_zstd_dict_trainer: true 2026-03-10T07:03:36.257 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.compression_opts.window_bits: -14 2026-03-10T07:03:36.257 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.compression_opts.level: 32767 2026-03-10T07:03:36.257 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.compression_opts.strategy: 0 2026-03-10T07:03:36.257 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.compression_opts.max_dict_bytes: 0 2026-03-10T07:03:36.257 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.compression_opts.zstd_max_train_bytes: 0 2026-03-10T07:03:36.257 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.compression_opts.use_zstd_dict_trainer: true 2026-03-10T07:03:36.257 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.compression_opts.parallel_threads: 1 2026-03-10T07:03:36.257 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.compression_opts.enabled: false 2026-03-10T07:03:36.257 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.compression_opts.max_dict_buffer_bytes: 0 2026-03-10T07:03:36.257 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.level0_file_num_compaction_trigger: 4 2026-03-10T07:03:36.257 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.level0_slowdown_writes_trigger: 20 2026-03-10T07:03:36.257 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.level0_stop_writes_trigger: 36 2026-03-10T07:03:36.257 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.target_file_size_base: 67108864 2026-03-10T07:03:36.257 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.target_file_size_multiplier: 1 2026-03-10T07:03:36.257 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.max_bytes_for_level_base: 268435456 2026-03-10T07:03:36.257 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.level_compaction_dynamic_level_bytes: 1 2026-03-10T07:03:36.257 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.max_bytes_for_level_multiplier: 10.000000 2026-03-10T07:03:36.257 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1 2026-03-10T07:03:36.257 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1 2026-03-10T07:03:36.257 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1 2026-03-10T07:03:36.257 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1 2026-03-10T07:03:36.257 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1 2026-03-10T07:03:36.257 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1 2026-03-10T07:03:36.257 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1 2026-03-10T07:03:36.257 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.max_sequential_skip_in_iterations: 8 2026-03-10T07:03:36.257 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.max_compaction_bytes: 1677721600 2026-03-10T07:03:36.257 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.ignore_max_compaction_bytes_for_input: true 2026-03-10T07:03:36.257 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.arena_block_size: 1048576 2026-03-10T07:03:36.257 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.soft_pending_compaction_bytes_limit: 68719476736 2026-03-10T07:03:36.257 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.hard_pending_compaction_bytes_limit: 274877906944 2026-03-10T07:03:36.257 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.disable_auto_compactions: 0 2026-03-10T07:03:36.257 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.compaction_style: kCompactionStyleLevel 2026-03-10T07:03:36.257 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.compaction_pri: kMinOverlappingRatio 2026-03-10T07:03:36.257 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.compaction_options_universal.size_ratio: 1 2026-03-10T07:03:36.257 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.compaction_options_universal.min_merge_width: 2 2026-03-10T07:03:36.258 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295 2026-03-10T07:03:36.258 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200 2026-03-10T07:03:36.258 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1 2026-03-10T07:03:36.258 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize 2026-03-10T07:03:36.258 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824 2026-03-10T07:03:36.258 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0 2026-03-10T07:03:36.258 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0); 2026-03-10T07:03:36.258 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.inplace_update_support: 0 2026-03-10T07:03:36.258 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.inplace_update_num_locks: 10000 2026-03-10T07:03:36.258 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.memtable_prefix_bloom_size_ratio: 0.000000 2026-03-10T07:03:36.258 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.memtable_whole_key_filtering: 0 2026-03-10T07:03:36.258 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.memtable_huge_page_size: 0 2026-03-10T07:03:36.258 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.bloom_locality: 0 2026-03-10T07:03:36.258 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.max_successive_merges: 0 2026-03-10T07:03:36.258 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.optimize_filters_for_hits: 0 2026-03-10T07:03:36.258 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.paranoid_file_checks: 0 2026-03-10T07:03:36.258 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.force_consistency_checks: 1 2026-03-10T07:03:36.258 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.report_bg_io_stats: 0 2026-03-10T07:03:36.258 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.ttl: 2592000 2026-03-10T07:03:36.258 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.periodic_compaction_seconds: 0 2026-03-10T07:03:36.258 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.preclude_last_level_data_seconds: 0 2026-03-10T07:03:36.258 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.preserve_internal_time_seconds: 0 2026-03-10T07:03:36.258 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.enable_blob_files: false 2026-03-10T07:03:36.258 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.min_blob_size: 0 2026-03-10T07:03:36.258 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.blob_file_size: 268435456 2026-03-10T07:03:36.258 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.blob_compression_type: NoCompression 2026-03-10T07:03:36.258 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.enable_blob_garbage_collection: false 2026-03-10T07:03:36.258 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.blob_garbage_collection_age_cutoff: 0.250000 2026-03-10T07:03:36.258 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000 2026-03-10T07:03:36.258 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.blob_compaction_readahead_size: 0 2026-03-10T07:03:36.258 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.blob_file_starting_level: 0 2026-03-10T07:03:36.258 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: Options.experimental_mempurge_threshold: 0.000000 2026-03-10T07:03:36.258 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: [table/block_based/block_based_table_reader.cc:721] At least one SST file opened without unique ID to verify: 18.sst 2026-03-10T07:03:36.258 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed. 2026-03-10T07:03:36.258 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: [db/version_set.cc:5566] Recovered from manifest file:/var/lib/ceph/mon/ceph-b/store.db/MANIFEST-000009 succeeded,manifest_file_number is 9, next_file_number is 20, last_sequence is 10517, log_number is 16,prev_log_number is 0,max_column_family is 0,min_log_number_to_keep is 0 2026-03-10T07:03:36.258 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: [db/version_set.cc:5581] Column family [default] (ID 0), log number is 16 2026-03-10T07:03:36.258 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: [db/db_impl/db_impl_open.cc:539] DB ID: 52417cef-587a-45f6-8326-abc28943792e 2026-03-10T07:03:36.258 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: EVENT_LOG_v1 {"time_micros": 1773126216142479, "job": 1, "event": "recovery_started", "wal_files": [16]} 2026-03-10T07:03:36.258 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: [db/db_impl/db_impl_open.cc:1043] Recovering log #16 mode 2 2026-03-10T07:03:36.258 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: EVENT_LOG_v1 {"time_micros": 1773126216143333, "cf_name": "default", "job": 1, "event": "table_file_creation", "file_number": 21, "file_size": 2908, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 10519, "largest_seqno": 10529, "table_properties": {"data_size": 1785, "index_size": 33, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 69, "raw_key_size": 351, "raw_average_key_size": 31, "raw_value_size": 1471, "raw_average_value_size": 133, "num_data_blocks": 1, "num_entries": 11, "num_filter_entries": 11, "num_deletions": 0, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1773126216, "oldest_key_time": 0, "file_creation_time": 0, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "52417cef-587a-45f6-8326-abc28943792e", "db_session_id": "GNTOYB2PN4TD0J7QURNY", "orig_file_number": 21, "seqno_to_time_mapping": "N/A"}} 2026-03-10T07:03:36.258 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: EVENT_LOG_v1 {"time_micros": 1773126216143384, "job": 1, "event": "recovery_finished"} 2026-03-10T07:03:36.258 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: [db/version_set.cc:5047] Creating manifest 23 2026-03-10T07:03:36.258 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed. 2026-03-10T07:03:36.258 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-b/store.db/000016.log immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000 2026-03-10T07:03:36.258 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: [db/db_impl/db_impl_open.cc:1987] SstFileManager instance 0x5618678fee00 2026-03-10T07:03:36.258 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: DB pointer 0x561867a14000 2026-03-10T07:03:36.258 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: starting mon.b rank 2 at public addrs [v2:192.168.123.105:3300/0,v1:192.168.123.105:6789/0] at bind addrs [v2:192.168.123.105:3300/0,v1:192.168.123.105:6789/0] mon_data /var/lib/ceph/mon/ceph-b fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc 2026-03-10T07:03:36.258 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: mon.b@-1(???) e3 preinit fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc 2026-03-10T07:03:36.258 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: mon.b@-1(???).mds e0 Unable to load 'last_metadata' 2026-03-10T07:03:36.258 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: mon.b@-1(???).mds e0 Unable to load 'last_metadata' 2026-03-10T07:03:36.258 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: mon.b@-1(???).mds e1 new map 2026-03-10T07:03:36.258 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: mon.b@-1(???).mds e1 print_map 2026-03-10T07:03:36.258 INFO:journalctl@ceph.mon.b.vm05.stdout: e1 2026-03-10T07:03:36.258 INFO:journalctl@ceph.mon.b.vm05.stdout: btime 1970-01-01T00:00:00:000000+0000 2026-03-10T07:03:36.258 INFO:journalctl@ceph.mon.b.vm05.stdout: enable_multiple, ever_enabled_multiple: 1,1 2026-03-10T07:03:36.258 INFO:journalctl@ceph.mon.b.vm05.stdout: default compat: compat={},rocompat={},incompat={1=base v0.20,2=client writeable ranges,3=default file layouts on dirs,4=dir inode in separate object,5=mds uses versioned encoding,6=dirfrag is stored in omap,8=no anchor table,9=file layout v2,10=snaprealm v2} 2026-03-10T07:03:36.258 INFO:journalctl@ceph.mon.b.vm05.stdout: legacy client fscid: -1 2026-03-10T07:03:36.258 INFO:journalctl@ceph.mon.b.vm05.stdout: 2026-03-10T07:03:36.258 INFO:journalctl@ceph.mon.b.vm05.stdout: No filesystems configured 2026-03-10T07:03:36.258 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: mon.b@-1(???).osd e86 crush map has features 3314933000854323200, adjusting msgr requires 2026-03-10T07:03:36.258 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: mon.b@-1(???).osd e86 crush map has features 432629239337189376, adjusting msgr requires 2026-03-10T07:03:36.259 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: mon.b@-1(???).osd e86 crush map has features 432629239337189376, adjusting msgr requires 2026-03-10T07:03:36.259 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: mon.b@-1(???).osd e86 crush map has features 432629239337189376, adjusting msgr requires 2026-03-10T07:03:36.259 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: mon.b@-1(???).paxosservice(auth 1..21) refresh upgraded, format 0 -> 3 2026-03-10T07:03:36.259 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: mon.b@-1(???).mgr e0 loading version 33 2026-03-10T07:03:36.259 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: mon.b@-1(???).mgr e33 active server: [v2:192.168.123.102:6800/696079514,v1:192.168.123.102:6801/696079514](24905) 2026-03-10T07:03:36.259 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: mon.b@-1(???).mgr e33 mkfs or daemon transitioned to available, loading commands 2026-03-10T07:03:36.259 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS ------- 2026-03-10T07:03:36.259 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:36 vm05 ceph-mon[76604]: rocksdb: [db/db_impl/db_impl.cc:1111] 2026-03-10T07:03:36.259 INFO:journalctl@ceph.mon.b.vm05.stdout: ** DB Stats ** 2026-03-10T07:03:36.259 INFO:journalctl@ceph.mon.b.vm05.stdout: Uptime(secs): 0.0 total, 0.0 interval 2026-03-10T07:03:36.259 INFO:journalctl@ceph.mon.b.vm05.stdout: Cumulative writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 GB, 0.00 MB/s 2026-03-10T07:03:36.259 INFO:journalctl@ceph.mon.b.vm05.stdout: Cumulative WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s 2026-03-10T07:03:36.259 INFO:journalctl@ceph.mon.b.vm05.stdout: Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent 2026-03-10T07:03:36.259 INFO:journalctl@ceph.mon.b.vm05.stdout: Interval writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 MB, 0.00 MB/s 2026-03-10T07:03:36.259 INFO:journalctl@ceph.mon.b.vm05.stdout: Interval WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s 2026-03-10T07:03:36.259 INFO:journalctl@ceph.mon.b.vm05.stdout: Interval stall: 00:00:0.000 H:M:S, 0.0 percent 2026-03-10T07:03:36.259 INFO:journalctl@ceph.mon.b.vm05.stdout: 2026-03-10T07:03:36.259 INFO:journalctl@ceph.mon.b.vm05.stdout: ** Compaction Stats [default] ** 2026-03-10T07:03:36.259 INFO:journalctl@ceph.mon.b.vm05.stdout: Level Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB) 2026-03-10T07:03:36.259 INFO:journalctl@ceph.mon.b.vm05.stdout: ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ 2026-03-10T07:03:36.259 INFO:journalctl@ceph.mon.b.vm05.stdout: L0 1/0 2.84 KB 0.2 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 3.7 0.00 0.00 1 0.001 0 0 0.0 0.0 2026-03-10T07:03:36.259 INFO:journalctl@ceph.mon.b.vm05.stdout: L6 1/0 11.42 MB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.00 0.00 0 0.000 0 0 0.0 0.0 2026-03-10T07:03:36.259 INFO:journalctl@ceph.mon.b.vm05.stdout: Sum 2/0 11.43 MB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 3.7 0.00 0.00 1 0.001 0 0 0.0 0.0 2026-03-10T07:03:36.259 INFO:journalctl@ceph.mon.b.vm05.stdout: Int 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 3.7 0.00 0.00 1 0.001 0 0 0.0 0.0 2026-03-10T07:03:36.259 INFO:journalctl@ceph.mon.b.vm05.stdout: 2026-03-10T07:03:36.259 INFO:journalctl@ceph.mon.b.vm05.stdout: ** Compaction Stats [default] ** 2026-03-10T07:03:36.259 INFO:journalctl@ceph.mon.b.vm05.stdout: Priority Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB) 2026-03-10T07:03:36.259 INFO:journalctl@ceph.mon.b.vm05.stdout: --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 2026-03-10T07:03:36.259 INFO:journalctl@ceph.mon.b.vm05.stdout: User 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 3.7 0.00 0.00 1 0.001 0 0 0.0 0.0 2026-03-10T07:03:36.259 INFO:journalctl@ceph.mon.b.vm05.stdout: 2026-03-10T07:03:36.259 INFO:journalctl@ceph.mon.b.vm05.stdout: Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0 2026-03-10T07:03:36.259 INFO:journalctl@ceph.mon.b.vm05.stdout: 2026-03-10T07:03:36.259 INFO:journalctl@ceph.mon.b.vm05.stdout: Uptime(secs): 0.0 total, 0.0 interval 2026-03-10T07:03:36.259 INFO:journalctl@ceph.mon.b.vm05.stdout: Flush(GB): cumulative 0.000, interval 0.000 2026-03-10T07:03:36.259 INFO:journalctl@ceph.mon.b.vm05.stdout: AddFile(GB): cumulative 0.000, interval 0.000 2026-03-10T07:03:36.259 INFO:journalctl@ceph.mon.b.vm05.stdout: AddFile(Total Files): cumulative 0, interval 0 2026-03-10T07:03:36.259 INFO:journalctl@ceph.mon.b.vm05.stdout: AddFile(L0 Files): cumulative 0, interval 0 2026-03-10T07:03:36.259 INFO:journalctl@ceph.mon.b.vm05.stdout: AddFile(Keys): cumulative 0, interval 0 2026-03-10T07:03:36.259 INFO:journalctl@ceph.mon.b.vm05.stdout: Cumulative compaction: 0.00 GB write, 0.21 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-10T07:03:36.259 INFO:journalctl@ceph.mon.b.vm05.stdout: Interval compaction: 0.00 GB write, 0.21 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-10T07:03:36.259 INFO:journalctl@ceph.mon.b.vm05.stdout: Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count 2026-03-10T07:03:36.259 INFO:journalctl@ceph.mon.b.vm05.stdout: Block cache BinnedLRUCache@0x5618678fd350#2 capacity: 512.00 MB usage: 484.73 KB table_size: 0 occupancy: 18446744073709551615 collections: 1 last_copies: 0 last_secs: 6e-06 secs_since: 0 2026-03-10T07:03:36.259 INFO:journalctl@ceph.mon.b.vm05.stdout: Block cache entry stats(count,size,portion): DataBlock(38,455.39 KB,0.0868589%) FilterBlock(2,9.14 KB,0.00174344%) IndexBlock(2,20.20 KB,0.00385344%) Misc(1,0.00 KB,0%) 2026-03-10T07:03:36.259 INFO:journalctl@ceph.mon.b.vm05.stdout: 2026-03-10T07:03:36.259 INFO:journalctl@ceph.mon.b.vm05.stdout: ** File Read Latency Histogram By Level [default] ** 2026-03-10T07:03:37.245 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:36 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:03:36.948Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm02\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"28bd35e6-1c4e-11f1-9057-21b3549603fc\", domainname=\"(none)\", instance=\"vm02\", job=\"node\", machine=\"x86_64\", nodename=\"vm02\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm02\", job=\"node\", machine=\"x86_64\", nodename=\"vm02\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T07:03:37.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:37 vm05 ceph-mon[76604]: from='client.15180 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:03:37.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:37 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T07:03:37.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:37 vm05 ceph-mon[76604]: mon.c calling monitor election 2026-03-10T07:03:37.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:37 vm05 ceph-mon[76604]: mon.b calling monitor election 2026-03-10T07:03:37.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:37 vm05 ceph-mon[76604]: mon.a calling monitor election 2026-03-10T07:03:37.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:37 vm05 ceph-mon[76604]: mon.a is new leader, mons a,c,b in quorum (ranks 0,1,2) 2026-03-10T07:03:37.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:37 vm05 ceph-mon[76604]: monmap epoch 4 2026-03-10T07:03:37.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:37 vm05 ceph-mon[76604]: fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc 2026-03-10T07:03:37.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:37 vm05 ceph-mon[76604]: last_changed 2026-03-10T07:03:36.323727+0000 2026-03-10T07:03:37.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:37 vm05 ceph-mon[76604]: created 2026-03-10T06:56:10.631622+0000 2026-03-10T07:03:37.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:37 vm05 ceph-mon[76604]: min_mon_release 19 (squid) 2026-03-10T07:03:37.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:37 vm05 ceph-mon[76604]: election_strategy: 1 2026-03-10T07:03:37.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:37 vm05 ceph-mon[76604]: 0: [v2:192.168.123.102:3300/0,v1:192.168.123.102:6789/0] mon.a 2026-03-10T07:03:37.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:37 vm05 ceph-mon[76604]: 1: [v2:192.168.123.102:3301/0,v1:192.168.123.102:6790/0] mon.c 2026-03-10T07:03:37.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:37 vm05 ceph-mon[76604]: 2: [v2:192.168.123.105:3300/0,v1:192.168.123.105:6789/0] mon.b 2026-03-10T07:03:37.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:37 vm05 ceph-mon[76604]: fsmap 2026-03-10T07:03:37.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:37 vm05 ceph-mon[76604]: osdmap e86: 8 total, 8 up, 8 in 2026-03-10T07:03:37.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:37 vm05 ceph-mon[76604]: mgrmap e33: y(active, since 11s), standbys: x 2026-03-10T07:03:37.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:37 vm05 ceph-mon[76604]: overall HEALTH_OK 2026-03-10T07:03:37.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:37 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:37.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:37 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T07:03:37.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:37 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T07:03:37.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:37 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:37.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:37 vm02 ceph-mon[87980]: from='client.15180 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:03:37.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:37 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T07:03:37.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:37 vm02 ceph-mon[87980]: mon.c calling monitor election 2026-03-10T07:03:37.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:37 vm02 ceph-mon[87980]: mon.b calling monitor election 2026-03-10T07:03:37.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:37 vm02 ceph-mon[87980]: mon.a calling monitor election 2026-03-10T07:03:37.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:37 vm02 ceph-mon[87980]: mon.a is new leader, mons a,c,b in quorum (ranks 0,1,2) 2026-03-10T07:03:37.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:37 vm02 ceph-mon[87980]: monmap epoch 4 2026-03-10T07:03:37.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:37 vm02 ceph-mon[87980]: fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc 2026-03-10T07:03:37.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:37 vm02 ceph-mon[87980]: last_changed 2026-03-10T07:03:36.323727+0000 2026-03-10T07:03:37.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:37 vm02 ceph-mon[87980]: created 2026-03-10T06:56:10.631622+0000 2026-03-10T07:03:37.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:37 vm02 ceph-mon[87980]: min_mon_release 19 (squid) 2026-03-10T07:03:37.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:37 vm02 ceph-mon[87980]: election_strategy: 1 2026-03-10T07:03:37.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:37 vm02 ceph-mon[87980]: 0: [v2:192.168.123.102:3300/0,v1:192.168.123.102:6789/0] mon.a 2026-03-10T07:03:37.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:37 vm02 ceph-mon[87980]: 1: [v2:192.168.123.102:3301/0,v1:192.168.123.102:6790/0] mon.c 2026-03-10T07:03:37.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:37 vm02 ceph-mon[87980]: 2: [v2:192.168.123.105:3300/0,v1:192.168.123.105:6789/0] mon.b 2026-03-10T07:03:37.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:37 vm02 ceph-mon[87980]: fsmap 2026-03-10T07:03:37.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:37 vm02 ceph-mon[87980]: osdmap e86: 8 total, 8 up, 8 in 2026-03-10T07:03:37.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:37 vm02 ceph-mon[87980]: mgrmap e33: y(active, since 11s), standbys: x 2026-03-10T07:03:37.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:37 vm02 ceph-mon[87980]: overall HEALTH_OK 2026-03-10T07:03:37.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:37 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:37.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:37 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T07:03:37.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:37 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T07:03:37.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:37 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:37.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:37 vm02 ceph-mon[86149]: from='client.15180 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:03:37.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:37 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-10T07:03:37.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:37 vm02 ceph-mon[86149]: mon.c calling monitor election 2026-03-10T07:03:37.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:37 vm02 ceph-mon[86149]: mon.b calling monitor election 2026-03-10T07:03:37.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:37 vm02 ceph-mon[86149]: mon.a calling monitor election 2026-03-10T07:03:37.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:37 vm02 ceph-mon[86149]: mon.a is new leader, mons a,c,b in quorum (ranks 0,1,2) 2026-03-10T07:03:37.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:37 vm02 ceph-mon[86149]: monmap epoch 4 2026-03-10T07:03:37.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:37 vm02 ceph-mon[86149]: fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc 2026-03-10T07:03:37.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:37 vm02 ceph-mon[86149]: last_changed 2026-03-10T07:03:36.323727+0000 2026-03-10T07:03:37.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:37 vm02 ceph-mon[86149]: created 2026-03-10T06:56:10.631622+0000 2026-03-10T07:03:37.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:37 vm02 ceph-mon[86149]: min_mon_release 19 (squid) 2026-03-10T07:03:37.586 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:37 vm02 ceph-mon[86149]: election_strategy: 1 2026-03-10T07:03:37.586 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:37 vm02 ceph-mon[86149]: 0: [v2:192.168.123.102:3300/0,v1:192.168.123.102:6789/0] mon.a 2026-03-10T07:03:37.586 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:37 vm02 ceph-mon[86149]: 1: [v2:192.168.123.102:3301/0,v1:192.168.123.102:6790/0] mon.c 2026-03-10T07:03:37.586 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:37 vm02 ceph-mon[86149]: 2: [v2:192.168.123.105:3300/0,v1:192.168.123.105:6789/0] mon.b 2026-03-10T07:03:37.586 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:37 vm02 ceph-mon[86149]: fsmap 2026-03-10T07:03:37.586 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:37 vm02 ceph-mon[86149]: osdmap e86: 8 total, 8 up, 8 in 2026-03-10T07:03:37.586 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:37 vm02 ceph-mon[86149]: mgrmap e33: y(active, since 11s), standbys: x 2026-03-10T07:03:37.586 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:37 vm02 ceph-mon[86149]: overall HEALTH_OK 2026-03-10T07:03:37.586 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:37 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:37.586 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:37 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-10T07:03:37.586 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:37 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-10T07:03:37.586 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:37 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:38.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:38 vm05 ceph-mon[76604]: pgmap v9: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 17 KiB/s rd, 0 B/s wr, 7 op/s 2026-03-10T07:03:38.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:38 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:38.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:38 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:38.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:38 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:38.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:38 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:38.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:38 vm02 ceph-mon[87980]: pgmap v9: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 17 KiB/s rd, 0 B/s wr, 7 op/s 2026-03-10T07:03:38.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:38 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:38.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:38 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:38.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:38 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:38.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:38 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:38.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:38 vm02 ceph-mon[86149]: pgmap v9: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 17 KiB/s rd, 0 B/s wr, 7 op/s 2026-03-10T07:03:38.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:38 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:38.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:38 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:38.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:38 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:38.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:38 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:40.236 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:40 vm02 ceph-mon[86149]: pgmap v10: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 0 B/s wr, 6 op/s 2026-03-10T07:03:40.236 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:40 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:40.236 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:40 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:40.237 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:40 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:03:40.237 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:40 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T07:03:40.237 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:40 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:40.237 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:40 vm02 ceph-mon[86149]: Reconfiguring mon.a (monmap changed)... 2026-03-10T07:03:40.237 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:40 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-10T07:03:40.237 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:40 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-10T07:03:40.237 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:40 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:03:40.237 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:40 vm02 ceph-mon[86149]: Reconfiguring daemon mon.a on vm02 2026-03-10T07:03:40.237 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:40 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:40.237 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:40 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:40.237 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:40 vm02 ceph-mon[86149]: Reconfiguring mgr.y (monmap changed)... 2026-03-10T07:03:40.237 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:40 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.y", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T07:03:40.237 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:40 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-10T07:03:40.237 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:40 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:03:40.237 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:40 vm02 ceph-mon[86149]: Reconfiguring daemon mgr.y on vm02 2026-03-10T07:03:40.237 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:40 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:40.237 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:40 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:40.237 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:40 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-10T07:03:40.237 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:40 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-10T07:03:40.237 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:40 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:03:40.237 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:40 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:40.237 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:40 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:40.237 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:40 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.0"}]: dispatch 2026-03-10T07:03:40.237 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:40 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:03:40.237 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:40 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T07:03:40.237 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:40 vm02 ceph-mon[87980]: pgmap v10: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 0 B/s wr, 6 op/s 2026-03-10T07:03:40.237 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:40 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:40.237 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:40 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:40.237 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:40 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:03:40.237 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:40 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T07:03:40.237 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:40 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:40.237 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:40 vm02 ceph-mon[87980]: Reconfiguring mon.a (monmap changed)... 2026-03-10T07:03:40.237 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:40 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-10T07:03:40.237 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:40 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-10T07:03:40.237 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:40 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:03:40.237 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:40 vm02 ceph-mon[87980]: Reconfiguring daemon mon.a on vm02 2026-03-10T07:03:40.237 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:40 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:40.237 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:40 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:40.237 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:40 vm02 ceph-mon[87980]: Reconfiguring mgr.y (monmap changed)... 2026-03-10T07:03:40.237 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:40 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.y", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T07:03:40.237 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:40 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-10T07:03:40.237 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:40 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:03:40.237 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:40 vm02 ceph-mon[87980]: Reconfiguring daemon mgr.y on vm02 2026-03-10T07:03:40.237 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:40 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:40.237 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:40 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:40.237 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:40 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-10T07:03:40.237 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:40 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-10T07:03:40.237 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:40 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:03:40.237 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:40 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:40.237 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:40 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:40.237 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:40 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.0"}]: dispatch 2026-03-10T07:03:40.237 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:40 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:03:40.237 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:40 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T07:03:40.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:40 vm05 ceph-mon[76604]: pgmap v10: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 0 B/s wr, 6 op/s 2026-03-10T07:03:40.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:40 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:40.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:40 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:40.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:40 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:03:40.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:40 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T07:03:40.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:40 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:40.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:40 vm05 ceph-mon[76604]: Reconfiguring mon.a (monmap changed)... 2026-03-10T07:03:40.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:40 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-10T07:03:40.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:40 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-10T07:03:40.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:40 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:03:40.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:40 vm05 ceph-mon[76604]: Reconfiguring daemon mon.a on vm02 2026-03-10T07:03:40.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:40 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:40.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:40 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:40.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:40 vm05 ceph-mon[76604]: Reconfiguring mgr.y (monmap changed)... 2026-03-10T07:03:40.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:40 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.y", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T07:03:40.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:40 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-10T07:03:40.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:40 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:03:40.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:40 vm05 ceph-mon[76604]: Reconfiguring daemon mgr.y on vm02 2026-03-10T07:03:40.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:40 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:40.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:40 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:40.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:40 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-10T07:03:40.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:40 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-10T07:03:40.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:40 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:03:40.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:40 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:40.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:40 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:40.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:40 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.0"}]: dispatch 2026-03-10T07:03:40.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:40 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:03:40.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:40 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T07:03:41.552 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:41 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: 2026-03-10T07:03:41.160+0000 7f03c12b6640 -1 mgr.server handle_report got status from non-daemon mon.b 2026-03-10T07:03:41.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:41 vm02 ceph-mon[86149]: Reconfiguring mon.c (monmap changed)... 2026-03-10T07:03:41.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:41 vm02 ceph-mon[86149]: Reconfiguring daemon mon.c on vm02 2026-03-10T07:03:41.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:41 vm02 ceph-mon[86149]: Reconfiguring osd.0 (monmap changed)... 2026-03-10T07:03:41.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:41 vm02 ceph-mon[86149]: Reconfiguring daemon osd.0 on vm02 2026-03-10T07:03:41.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:41 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:41.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:41 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:41.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:41 vm02 ceph-mon[86149]: Reconfiguring osd.1 (monmap changed)... 2026-03-10T07:03:41.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:41 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.1"}]: dispatch 2026-03-10T07:03:41.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:41 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:03:41.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:41 vm02 ceph-mon[86149]: Reconfiguring daemon osd.1 on vm02 2026-03-10T07:03:41.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:41 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:41.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:41 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:41.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:41 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.2"}]: dispatch 2026-03-10T07:03:41.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:41 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:03:41.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:41 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:41.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:41 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:41.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:41 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.3"}]: dispatch 2026-03-10T07:03:41.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:41 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:03:41.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:41 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:41.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:41 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:41.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:41 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm02.kkmsll", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T07:03:41.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:41 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:03:41.836 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:41 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: ::ffff:192.168.123.105 - - [10/Mar/2026:07:03:41] "GET /metrics HTTP/1.1" 200 37756 "" "Prometheus/2.51.0" 2026-03-10T07:03:41.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:41 vm02 ceph-mon[87980]: Reconfiguring mon.c (monmap changed)... 2026-03-10T07:03:41.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:41 vm02 ceph-mon[87980]: Reconfiguring daemon mon.c on vm02 2026-03-10T07:03:41.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:41 vm02 ceph-mon[87980]: Reconfiguring osd.0 (monmap changed)... 2026-03-10T07:03:41.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:41 vm02 ceph-mon[87980]: Reconfiguring daemon osd.0 on vm02 2026-03-10T07:03:41.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:41 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:41.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:41 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:41.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:41 vm02 ceph-mon[87980]: Reconfiguring osd.1 (monmap changed)... 2026-03-10T07:03:41.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:41 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.1"}]: dispatch 2026-03-10T07:03:41.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:41 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:03:41.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:41 vm02 ceph-mon[87980]: Reconfiguring daemon osd.1 on vm02 2026-03-10T07:03:41.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:41 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:41.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:41 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:41.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:41 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.2"}]: dispatch 2026-03-10T07:03:41.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:41 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:03:41.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:41 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:41.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:41 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:41.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:41 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.3"}]: dispatch 2026-03-10T07:03:41.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:41 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:03:41.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:41 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:41.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:41 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:41.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:41 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm02.kkmsll", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T07:03:41.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:41 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:03:42.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:41 vm05 ceph-mon[76604]: Reconfiguring mon.c (monmap changed)... 2026-03-10T07:03:42.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:41 vm05 ceph-mon[76604]: Reconfiguring daemon mon.c on vm02 2026-03-10T07:03:42.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:41 vm05 ceph-mon[76604]: Reconfiguring osd.0 (monmap changed)... 2026-03-10T07:03:42.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:41 vm05 ceph-mon[76604]: Reconfiguring daemon osd.0 on vm02 2026-03-10T07:03:42.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:41 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:42.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:41 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:42.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:41 vm05 ceph-mon[76604]: Reconfiguring osd.1 (monmap changed)... 2026-03-10T07:03:42.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:41 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.1"}]: dispatch 2026-03-10T07:03:42.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:41 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:03:42.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:41 vm05 ceph-mon[76604]: Reconfiguring daemon osd.1 on vm02 2026-03-10T07:03:42.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:41 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:42.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:41 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:42.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:41 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.2"}]: dispatch 2026-03-10T07:03:42.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:41 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:03:42.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:41 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:42.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:41 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:42.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:41 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.3"}]: dispatch 2026-03-10T07:03:42.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:41 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:03:42.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:41 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:42.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:41 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:42.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:41 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm02.kkmsll", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T07:03:42.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:41 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:03:42.698 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:42 vm05 ceph-mon[76604]: Reconfiguring osd.2 (monmap changed)... 2026-03-10T07:03:42.698 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:42 vm05 ceph-mon[76604]: Reconfiguring daemon osd.2 on vm02 2026-03-10T07:03:42.698 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:42 vm05 ceph-mon[76604]: Reconfiguring osd.3 (monmap changed)... 2026-03-10T07:03:42.698 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:42 vm05 ceph-mon[76604]: Reconfiguring daemon osd.3 on vm02 2026-03-10T07:03:42.698 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:42 vm05 ceph-mon[76604]: pgmap v11: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 0 B/s wr, 6 op/s 2026-03-10T07:03:42.698 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:42 vm05 ceph-mon[76604]: Reconfiguring rgw.foo.vm02.kkmsll (monmap changed)... 2026-03-10T07:03:42.698 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:42 vm05 ceph-mon[76604]: Reconfiguring daemon rgw.foo.vm02.kkmsll on vm02 2026-03-10T07:03:42.698 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:42 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:42.698 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:42 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:42.698 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:42 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.smpl.vm02.kyvfxo", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T07:03:42.698 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:42 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:03:42.698 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:42 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:42.698 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:42 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:42.698 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:42 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-10T07:03:42.698 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:42 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-10T07:03:42.698 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:42 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:03:42.698 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:42 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:42.698 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:42 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:42.698 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:42 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T07:03:42.698 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:42 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-10T07:03:42.698 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:42 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:03:42.698 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:42 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:42.698 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:42 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:42.698 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:42 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.4"}]: dispatch 2026-03-10T07:03:42.698 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:42 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:03:43.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:42 vm02 ceph-mon[87980]: Reconfiguring osd.2 (monmap changed)... 2026-03-10T07:03:43.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:42 vm02 ceph-mon[87980]: Reconfiguring daemon osd.2 on vm02 2026-03-10T07:03:43.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:42 vm02 ceph-mon[87980]: Reconfiguring osd.3 (monmap changed)... 2026-03-10T07:03:43.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:42 vm02 ceph-mon[87980]: Reconfiguring daemon osd.3 on vm02 2026-03-10T07:03:43.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:42 vm02 ceph-mon[87980]: pgmap v11: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 0 B/s wr, 6 op/s 2026-03-10T07:03:43.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:42 vm02 ceph-mon[87980]: Reconfiguring rgw.foo.vm02.kkmsll (monmap changed)... 2026-03-10T07:03:43.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:42 vm02 ceph-mon[87980]: Reconfiguring daemon rgw.foo.vm02.kkmsll on vm02 2026-03-10T07:03:43.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:42 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:43.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:42 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:43.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:42 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.smpl.vm02.kyvfxo", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T07:03:43.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:42 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:03:43.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:42 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:43.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:42 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:43.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:42 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-10T07:03:43.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:42 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-10T07:03:43.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:42 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:03:43.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:42 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:43.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:42 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:43.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:42 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T07:03:43.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:42 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-10T07:03:43.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:42 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:03:43.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:42 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:43.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:42 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:43.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:42 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.4"}]: dispatch 2026-03-10T07:03:43.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:42 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:03:43.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:42 vm02 ceph-mon[86149]: Reconfiguring osd.2 (monmap changed)... 2026-03-10T07:03:43.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:42 vm02 ceph-mon[86149]: Reconfiguring daemon osd.2 on vm02 2026-03-10T07:03:43.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:42 vm02 ceph-mon[86149]: Reconfiguring osd.3 (monmap changed)... 2026-03-10T07:03:43.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:42 vm02 ceph-mon[86149]: Reconfiguring daemon osd.3 on vm02 2026-03-10T07:03:43.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:42 vm02 ceph-mon[86149]: pgmap v11: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 0 B/s wr, 6 op/s 2026-03-10T07:03:43.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:42 vm02 ceph-mon[86149]: Reconfiguring rgw.foo.vm02.kkmsll (monmap changed)... 2026-03-10T07:03:43.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:42 vm02 ceph-mon[86149]: Reconfiguring daemon rgw.foo.vm02.kkmsll on vm02 2026-03-10T07:03:43.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:42 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:43.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:42 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:43.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:42 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.smpl.vm02.kyvfxo", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T07:03:43.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:42 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:03:43.086 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:42 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:43.086 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:42 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:43.086 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:42 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-10T07:03:43.086 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:42 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-10T07:03:43.086 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:42 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:03:43.086 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:42 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:43.086 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:42 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:43.086 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:42 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T07:03:43.086 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:42 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-10T07:03:43.086 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:42 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:03:43.086 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:42 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:43.086 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:42 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:43.086 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:42 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.4"}]: dispatch 2026-03-10T07:03:43.086 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:42 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:03:43.597 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:43 vm05 ceph-mon[76604]: Reconfiguring rgw.smpl.vm02.kyvfxo (monmap changed)... 2026-03-10T07:03:43.598 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:43 vm05 ceph-mon[76604]: Reconfiguring daemon rgw.smpl.vm02.kyvfxo on vm02 2026-03-10T07:03:43.598 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:43 vm05 ceph-mon[76604]: Reconfiguring mon.b (monmap changed)... 2026-03-10T07:03:43.598 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:43 vm05 ceph-mon[76604]: Reconfiguring daemon mon.b on vm05 2026-03-10T07:03:43.598 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:43 vm05 ceph-mon[76604]: Reconfiguring mgr.x (monmap changed)... 2026-03-10T07:03:43.598 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:43 vm05 ceph-mon[76604]: Reconfiguring daemon mgr.x on vm05 2026-03-10T07:03:43.598 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:43 vm05 ceph-mon[76604]: Reconfiguring osd.4 (monmap changed)... 2026-03-10T07:03:43.598 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:43 vm05 ceph-mon[76604]: Reconfiguring daemon osd.4 on vm05 2026-03-10T07:03:43.598 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:43 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:43.598 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:43 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:43.598 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:43 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.5"}]: dispatch 2026-03-10T07:03:43.598 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:43 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:03:43.598 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:43 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:43.598 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:43 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:43.598 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:43 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.6"}]: dispatch 2026-03-10T07:03:43.598 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:43 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:03:43.598 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:43 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:43.598 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:43 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:43.598 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:43 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.7"}]: dispatch 2026-03-10T07:03:43.598 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:43 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:03:44.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:43 vm02 ceph-mon[87980]: Reconfiguring rgw.smpl.vm02.kyvfxo (monmap changed)... 2026-03-10T07:03:44.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:43 vm02 ceph-mon[87980]: Reconfiguring daemon rgw.smpl.vm02.kyvfxo on vm02 2026-03-10T07:03:44.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:43 vm02 ceph-mon[87980]: Reconfiguring mon.b (monmap changed)... 2026-03-10T07:03:44.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:43 vm02 ceph-mon[87980]: Reconfiguring daemon mon.b on vm05 2026-03-10T07:03:44.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:43 vm02 ceph-mon[87980]: Reconfiguring mgr.x (monmap changed)... 2026-03-10T07:03:44.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:43 vm02 ceph-mon[87980]: Reconfiguring daemon mgr.x on vm05 2026-03-10T07:03:44.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:43 vm02 ceph-mon[87980]: Reconfiguring osd.4 (monmap changed)... 2026-03-10T07:03:44.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:43 vm02 ceph-mon[87980]: Reconfiguring daemon osd.4 on vm05 2026-03-10T07:03:44.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:43 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:44.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:43 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:44.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:43 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.5"}]: dispatch 2026-03-10T07:03:44.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:43 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:03:44.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:43 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:44.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:43 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:44.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:43 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.6"}]: dispatch 2026-03-10T07:03:44.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:43 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:03:44.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:43 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:44.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:43 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:44.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:43 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.7"}]: dispatch 2026-03-10T07:03:44.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:43 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:03:44.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:43 vm02 ceph-mon[86149]: Reconfiguring rgw.smpl.vm02.kyvfxo (monmap changed)... 2026-03-10T07:03:44.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:43 vm02 ceph-mon[86149]: Reconfiguring daemon rgw.smpl.vm02.kyvfxo on vm02 2026-03-10T07:03:44.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:43 vm02 ceph-mon[86149]: Reconfiguring mon.b (monmap changed)... 2026-03-10T07:03:44.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:43 vm02 ceph-mon[86149]: Reconfiguring daemon mon.b on vm05 2026-03-10T07:03:44.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:43 vm02 ceph-mon[86149]: Reconfiguring mgr.x (monmap changed)... 2026-03-10T07:03:44.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:43 vm02 ceph-mon[86149]: Reconfiguring daemon mgr.x on vm05 2026-03-10T07:03:44.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:43 vm02 ceph-mon[86149]: Reconfiguring osd.4 (monmap changed)... 2026-03-10T07:03:44.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:43 vm02 ceph-mon[86149]: Reconfiguring daemon osd.4 on vm05 2026-03-10T07:03:44.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:43 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:44.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:43 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:44.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:43 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.5"}]: dispatch 2026-03-10T07:03:44.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:43 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:03:44.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:43 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:44.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:43 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:44.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:43 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.6"}]: dispatch 2026-03-10T07:03:44.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:43 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:03:44.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:43 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:44.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:43 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:44.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:43 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.7"}]: dispatch 2026-03-10T07:03:44.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:43 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:03:44.182 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:44 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:03:44.150Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"28bd35e6-1c4e-11f1-9057-21b3549603fc\", cluster_addr=\"192.168.123.102\", device_class=\"hdd\", hostname=\"vm02\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.102\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.102\", device_class=\"hdd\", hostname=\"vm02\", instance=\"192.168.123.105:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.102\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T07:03:44.831 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:44 vm02 ceph-mon[87980]: Reconfiguring osd.5 (monmap changed)... 2026-03-10T07:03:44.831 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:44 vm02 ceph-mon[87980]: Reconfiguring daemon osd.5 on vm05 2026-03-10T07:03:44.832 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:44 vm02 ceph-mon[87980]: pgmap v12: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 16 KiB/s rd, 0 B/s wr, 7 op/s 2026-03-10T07:03:44.832 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:44 vm02 ceph-mon[87980]: Reconfiguring osd.6 (monmap changed)... 2026-03-10T07:03:44.832 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:44 vm02 ceph-mon[87980]: Reconfiguring daemon osd.6 on vm05 2026-03-10T07:03:44.832 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:44 vm02 ceph-mon[87980]: Reconfiguring osd.7 (monmap changed)... 2026-03-10T07:03:44.832 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:44 vm02 ceph-mon[87980]: Reconfiguring daemon osd.7 on vm05 2026-03-10T07:03:44.832 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:44 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:44.832 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:44 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:44.832 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:44 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm05.bmslvs", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T07:03:44.832 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:44 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:03:44.832 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:44 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:44.832 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:44 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:44.832 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:44 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.smpl.vm05.xjafam", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T07:03:44.832 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:44 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:03:44.832 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:44 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:44.832 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:44 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:44.832 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:44 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T07:03:44.832 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:44 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:03:44.832 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:44 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:03:44.832 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:44 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:44.832 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:44 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-10T07:03:44.832 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:44 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]': finished 2026-03-10T07:03:44.832 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:44 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.b"}]: dispatch 2026-03-10T07:03:44.832 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:44 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon.b"}]': finished 2026-03-10T07:03:44.832 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:44 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-10T07:03:44.832 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:44 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]': finished 2026-03-10T07:03:44.832 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:44 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:03:44.832 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:44 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:44.832 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:44 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 16}]: dispatch 2026-03-10T07:03:44.832 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:44 vm02 ceph-mon[86149]: Reconfiguring osd.5 (monmap changed)... 2026-03-10T07:03:44.832 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:44 vm02 ceph-mon[86149]: Reconfiguring daemon osd.5 on vm05 2026-03-10T07:03:44.832 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:44 vm02 ceph-mon[86149]: pgmap v12: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 16 KiB/s rd, 0 B/s wr, 7 op/s 2026-03-10T07:03:44.832 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:44 vm02 ceph-mon[86149]: Reconfiguring osd.6 (monmap changed)... 2026-03-10T07:03:44.832 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:44 vm02 ceph-mon[86149]: Reconfiguring daemon osd.6 on vm05 2026-03-10T07:03:44.832 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:44 vm02 ceph-mon[86149]: Reconfiguring osd.7 (monmap changed)... 2026-03-10T07:03:44.832 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:44 vm02 ceph-mon[86149]: Reconfiguring daemon osd.7 on vm05 2026-03-10T07:03:44.832 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:44 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:44.832 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:44 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:44.832 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:44 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm05.bmslvs", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T07:03:44.832 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:44 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:03:44.832 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:44 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:44.832 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:44 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:44.832 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:44 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.smpl.vm05.xjafam", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T07:03:44.832 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:44 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:03:44.832 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:44 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:44.832 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:44 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:44.832 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:44 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T07:03:44.832 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:44 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:03:44.832 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:44 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:03:44.832 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:44 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:44.832 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:44 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-10T07:03:44.832 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:44 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]': finished 2026-03-10T07:03:44.832 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:44 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.b"}]: dispatch 2026-03-10T07:03:44.832 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:44 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon.b"}]': finished 2026-03-10T07:03:44.832 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:44 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-10T07:03:44.832 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:44 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]': finished 2026-03-10T07:03:44.832 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:44 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:03:44.833 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:44 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:44.833 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:44 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 16}]: dispatch 2026-03-10T07:03:45.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:44 vm05 ceph-mon[76604]: Reconfiguring osd.5 (monmap changed)... 2026-03-10T07:03:45.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:44 vm05 ceph-mon[76604]: Reconfiguring daemon osd.5 on vm05 2026-03-10T07:03:45.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:44 vm05 ceph-mon[76604]: pgmap v12: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 16 KiB/s rd, 0 B/s wr, 7 op/s 2026-03-10T07:03:45.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:44 vm05 ceph-mon[76604]: Reconfiguring osd.6 (monmap changed)... 2026-03-10T07:03:45.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:44 vm05 ceph-mon[76604]: Reconfiguring daemon osd.6 on vm05 2026-03-10T07:03:45.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:44 vm05 ceph-mon[76604]: Reconfiguring osd.7 (monmap changed)... 2026-03-10T07:03:45.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:44 vm05 ceph-mon[76604]: Reconfiguring daemon osd.7 on vm05 2026-03-10T07:03:45.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:44 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:45.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:44 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:45.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:44 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm05.bmslvs", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T07:03:45.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:44 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:03:45.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:44 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:45.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:44 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:45.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:44 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.smpl.vm05.xjafam", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T07:03:45.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:44 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:03:45.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:44 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:45.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:44 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:45.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:44 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T07:03:45.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:44 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:03:45.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:44 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:03:45.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:44 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:45.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:44 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-10T07:03:45.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:44 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]': finished 2026-03-10T07:03:45.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:44 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.b"}]: dispatch 2026-03-10T07:03:45.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:44 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon.b"}]': finished 2026-03-10T07:03:45.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:44 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-10T07:03:45.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:44 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]': finished 2026-03-10T07:03:45.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:44 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:03:45.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:44 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:45.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:44 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 16}]: dispatch 2026-03-10T07:03:45.705 INFO:journalctl@ceph.osd.0.vm02.stdout:Mar 10 07:03:45 vm02 systemd[1]: Stopping Ceph osd.0 for 28bd35e6-1c4e-11f1-9057-21b3549603fc... 2026-03-10T07:03:45.705 INFO:journalctl@ceph.osd.0.vm02.stdout:Mar 10 07:03:45 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-0[57409]: 2026-03-10T07:03:45.493+0000 7f3eb0737700 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-10T07:03:45.705 INFO:journalctl@ceph.osd.0.vm02.stdout:Mar 10 07:03:45 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-0[57409]: 2026-03-10T07:03:45.493+0000 7f3eb0737700 -1 osd.0 86 *** Got signal Terminated *** 2026-03-10T07:03:45.705 INFO:journalctl@ceph.osd.0.vm02.stdout:Mar 10 07:03:45 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-0[57409]: 2026-03-10T07:03:45.493+0000 7f3eb0737700 -1 osd.0 86 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-10T07:03:45.705 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:45 vm02 ceph-mon[87980]: Reconfiguring rgw.foo.vm05.bmslvs (monmap changed)... 2026-03-10T07:03:45.705 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:45 vm02 ceph-mon[87980]: Reconfiguring daemon rgw.foo.vm05.bmslvs on vm05 2026-03-10T07:03:45.705 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:45 vm02 ceph-mon[87980]: Reconfiguring rgw.smpl.vm05.xjafam (monmap changed)... 2026-03-10T07:03:45.705 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:45 vm02 ceph-mon[87980]: Reconfiguring daemon rgw.smpl.vm05.xjafam on vm05 2026-03-10T07:03:45.705 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:45 vm02 ceph-mon[87980]: Upgrade: Setting container_image for all mon 2026-03-10T07:03:45.705 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:45 vm02 ceph-mon[87980]: Upgrade: Setting container_image for all crash 2026-03-10T07:03:45.705 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:45 vm02 ceph-mon[87980]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 16}]: dispatch 2026-03-10T07:03:45.705 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:45 vm02 ceph-mon[87980]: Upgrade: osd.0 is safe to restart 2026-03-10T07:03:45.705 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:45 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:45.705 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:45 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.0"}]: dispatch 2026-03-10T07:03:45.705 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:45 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:03:45.705 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:45 vm02 ceph-mon[87980]: osd.0 marked itself down and dead 2026-03-10T07:03:45.705 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:45 vm02 ceph-mon[86149]: Reconfiguring rgw.foo.vm05.bmslvs (monmap changed)... 2026-03-10T07:03:45.705 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:45 vm02 ceph-mon[86149]: Reconfiguring daemon rgw.foo.vm05.bmslvs on vm05 2026-03-10T07:03:45.705 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:45 vm02 ceph-mon[86149]: Reconfiguring rgw.smpl.vm05.xjafam (monmap changed)... 2026-03-10T07:03:45.705 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:45 vm02 ceph-mon[86149]: Reconfiguring daemon rgw.smpl.vm05.xjafam on vm05 2026-03-10T07:03:45.705 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:45 vm02 ceph-mon[86149]: Upgrade: Setting container_image for all mon 2026-03-10T07:03:45.705 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:45 vm02 ceph-mon[86149]: Upgrade: Setting container_image for all crash 2026-03-10T07:03:45.705 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:45 vm02 ceph-mon[86149]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 16}]: dispatch 2026-03-10T07:03:45.705 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:45 vm02 ceph-mon[86149]: Upgrade: osd.0 is safe to restart 2026-03-10T07:03:45.705 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:45 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:45.705 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:45 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.0"}]: dispatch 2026-03-10T07:03:45.705 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:45 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:03:45.705 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:45 vm02 ceph-mon[86149]: osd.0 marked itself down and dead 2026-03-10T07:03:45.965 INFO:journalctl@ceph.osd.0.vm02.stdout:Mar 10 07:03:45 vm02 podman[90031]: 2026-03-10 07:03:45.702816727 +0000 UTC m=+0.223060872 container died 0e972212a25170ce9e4146e2ff2b4b6dd69768d74b509e687d2361884efa718d (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-0, maintainer=Guillaume Abrioux , com.redhat.license_terms=https://centos.org/legal/licensing-policy/, release=754, distribution-scope=public, vendor=Red Hat, Inc., GIT_REPO=https://github.com/ceph/ceph-container.git, com.redhat.component=centos-stream-container, io.openshift.expose-services=, CEPH_POINT_RELEASE=-17.2.0, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, version=8, GIT_BRANCH=HEAD, GIT_CLEAN=True, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-type=git, io.buildah.version=1.19.8, RELEASE=HEAD, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, build-date=2022-05-03T08:36:31.336870, io.k8s.display-name=CentOS Stream 8, ceph=True, name=centos-stream, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, architecture=x86_64, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, io.openshift.tags=base centos centos-stream, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb) 2026-03-10T07:03:45.965 INFO:journalctl@ceph.osd.0.vm02.stdout:Mar 10 07:03:45 vm02 podman[90031]: 2026-03-10 07:03:45.727124586 +0000 UTC m=+0.247368731 container remove 0e972212a25170ce9e4146e2ff2b4b6dd69768d74b509e687d2361884efa718d (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-0, version=8, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, vcs-type=git, CEPH_POINT_RELEASE=-17.2.0, ceph=True, com.redhat.component=centos-stream-container, GIT_CLEAN=True, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, RELEASE=HEAD, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, vendor=Red Hat, Inc., io.openshift.expose-services=, GIT_BRANCH=HEAD, GIT_REPO=https://github.com/ceph/ceph-container.git, maintainer=Guillaume Abrioux , name=centos-stream, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, io.k8s.display-name=CentOS Stream 8, distribution-scope=public, build-date=2022-05-03T08:36:31.336870, release=754, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, io.buildah.version=1.19.8, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.tags=base centos centos-stream, architecture=x86_64) 2026-03-10T07:03:45.965 INFO:journalctl@ceph.osd.0.vm02.stdout:Mar 10 07:03:45 vm02 bash[90031]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-0 2026-03-10T07:03:45.965 INFO:journalctl@ceph.osd.0.vm02.stdout:Mar 10 07:03:45 vm02 podman[90099]: 2026-03-10 07:03:45.873228841 +0000 UTC m=+0.016276085 container create 751153f49bd15facd593c720174a68dfe6eea6872d6dd2f089a6ded736a83202 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-0-deactivate, OSD_FLAVOR=default, org.label-schema.build-date=20260223, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team , GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=squid, io.buildah.version=1.41.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, ceph=True) 2026-03-10T07:03:45.965 INFO:journalctl@ceph.osd.0.vm02.stdout:Mar 10 07:03:45 vm02 podman[90099]: 2026-03-10 07:03:45.914517606 +0000 UTC m=+0.057564860 container init 751153f49bd15facd593c720174a68dfe6eea6872d6dd2f089a6ded736a83202 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-0-deactivate, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.vendor=CentOS, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, io.buildah.version=1.41.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, CEPH_REF=squid, ceph=True, org.label-schema.schema-version=1.0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223) 2026-03-10T07:03:45.966 INFO:journalctl@ceph.osd.0.vm02.stdout:Mar 10 07:03:45 vm02 podman[90099]: 2026-03-10 07:03:45.918299184 +0000 UTC m=+0.061346419 container start 751153f49bd15facd593c720174a68dfe6eea6872d6dd2f089a6ded736a83202 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-0-deactivate, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=squid, org.opencontainers.image.authors=Ceph Release Team , ceph=True, org.label-schema.vendor=CentOS) 2026-03-10T07:03:45.966 INFO:journalctl@ceph.osd.0.vm02.stdout:Mar 10 07:03:45 vm02 podman[90099]: 2026-03-10 07:03:45.921275185 +0000 UTC m=+0.064322429 container attach 751153f49bd15facd593c720174a68dfe6eea6872d6dd2f089a6ded736a83202 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-0-deactivate, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team , ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=squid, org.label-schema.vendor=CentOS) 2026-03-10T07:03:46.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:45 vm05 ceph-mon[76604]: Reconfiguring rgw.foo.vm05.bmslvs (monmap changed)... 2026-03-10T07:03:46.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:45 vm05 ceph-mon[76604]: Reconfiguring daemon rgw.foo.vm05.bmslvs on vm05 2026-03-10T07:03:46.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:45 vm05 ceph-mon[76604]: Reconfiguring rgw.smpl.vm05.xjafam (monmap changed)... 2026-03-10T07:03:46.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:45 vm05 ceph-mon[76604]: Reconfiguring daemon rgw.smpl.vm05.xjafam on vm05 2026-03-10T07:03:46.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:45 vm05 ceph-mon[76604]: Upgrade: Setting container_image for all mon 2026-03-10T07:03:46.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:45 vm05 ceph-mon[76604]: Upgrade: Setting container_image for all crash 2026-03-10T07:03:46.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:45 vm05 ceph-mon[76604]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 16}]: dispatch 2026-03-10T07:03:46.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:45 vm05 ceph-mon[76604]: Upgrade: osd.0 is safe to restart 2026-03-10T07:03:46.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:45 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:46.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:45 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.0"}]: dispatch 2026-03-10T07:03:46.005 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:45 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:03:46.005 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:45 vm05 ceph-mon[76604]: osd.0 marked itself down and dead 2026-03-10T07:03:46.226 INFO:journalctl@ceph.osd.0.vm02.stdout:Mar 10 07:03:45 vm02 podman[90099]: 2026-03-10 07:03:45.866804356 +0000 UTC m=+0.009851611 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T07:03:46.226 INFO:journalctl@ceph.osd.0.vm02.stdout:Mar 10 07:03:46 vm02 podman[90118]: 2026-03-10 07:03:46.080941595 +0000 UTC m=+0.010745223 container died 751153f49bd15facd593c720174a68dfe6eea6872d6dd2f089a6ded736a83202 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-0-deactivate, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, io.buildah.version=1.41.3, CEPH_REF=squid, OSD_FLAVOR=default, org.label-schema.build-date=20260223, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2) 2026-03-10T07:03:46.478 INFO:journalctl@ceph.osd.0.vm02.stdout:Mar 10 07:03:46 vm02 podman[90118]: 2026-03-10 07:03:46.273674108 +0000 UTC m=+0.203477736 container remove 751153f49bd15facd593c720174a68dfe6eea6872d6dd2f089a6ded736a83202 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-0-deactivate, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid, org.label-schema.vendor=CentOS, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git) 2026-03-10T07:03:46.478 INFO:journalctl@ceph.osd.0.vm02.stdout:Mar 10 07:03:46 vm02 systemd[1]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@osd.0.service: Deactivated successfully. 2026-03-10T07:03:46.479 INFO:journalctl@ceph.osd.0.vm02.stdout:Mar 10 07:03:46 vm02 systemd[1]: Stopped Ceph osd.0 for 28bd35e6-1c4e-11f1-9057-21b3549603fc. 2026-03-10T07:03:46.479 INFO:journalctl@ceph.osd.0.vm02.stdout:Mar 10 07:03:46 vm02 systemd[1]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@osd.0.service: Consumed 3.054s CPU time. 2026-03-10T07:03:46.773 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:46 vm02 ceph-mon[87980]: Upgrade: Updating osd.0 2026-03-10T07:03:46.773 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:46 vm02 ceph-mon[87980]: Deploying daemon osd.0 on vm02 2026-03-10T07:03:46.773 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:46 vm02 ceph-mon[87980]: pgmap v13: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:03:46.773 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:46 vm02 ceph-mon[87980]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-10T07:03:46.773 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:46 vm02 ceph-mon[87980]: osdmap e87: 8 total, 7 up, 8 in 2026-03-10T07:03:46.773 INFO:journalctl@ceph.osd.0.vm02.stdout:Mar 10 07:03:46 vm02 systemd[1]: Starting Ceph osd.0 for 28bd35e6-1c4e-11f1-9057-21b3549603fc... 2026-03-10T07:03:46.774 INFO:journalctl@ceph.osd.0.vm02.stdout:Mar 10 07:03:46 vm02 podman[90211]: 2026-03-10 07:03:46.608017417 +0000 UTC m=+0.039530273 container create 104f83db7b903b01abe41f261a5e414835049dfd223489f2efdd67617faeb2db (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-0-activate, io.buildah.version=1.41.3, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.build-date=20260223, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, CEPH_REF=squid, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df) 2026-03-10T07:03:46.774 INFO:journalctl@ceph.osd.0.vm02.stdout:Mar 10 07:03:46 vm02 podman[90211]: 2026-03-10 07:03:46.580628581 +0000 UTC m=+0.012141447 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T07:03:46.774 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:46 vm02 ceph-mon[86149]: Upgrade: Updating osd.0 2026-03-10T07:03:46.774 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:46 vm02 ceph-mon[86149]: Deploying daemon osd.0 on vm02 2026-03-10T07:03:46.774 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:46 vm02 ceph-mon[86149]: pgmap v13: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:03:46.774 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:46 vm02 ceph-mon[86149]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-10T07:03:46.774 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:46 vm02 ceph-mon[86149]: osdmap e87: 8 total, 7 up, 8 in 2026-03-10T07:03:46.904 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:46 vm05 ceph-mon[76604]: Upgrade: Updating osd.0 2026-03-10T07:03:46.904 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:46 vm05 ceph-mon[76604]: Deploying daemon osd.0 on vm02 2026-03-10T07:03:46.904 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:46 vm05 ceph-mon[76604]: pgmap v13: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:03:46.904 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:46 vm05 ceph-mon[76604]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-10T07:03:46.904 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:46 vm05 ceph-mon[76604]: osdmap e87: 8 total, 7 up, 8 in 2026-03-10T07:03:47.085 INFO:journalctl@ceph.osd.0.vm02.stdout:Mar 10 07:03:46 vm02 podman[90211]: 2026-03-10 07:03:46.77373801 +0000 UTC m=+0.205250866 container init 104f83db7b903b01abe41f261a5e414835049dfd223489f2efdd67617faeb2db (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-0-activate, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_REF=squid, org.label-schema.schema-version=1.0, ceph=True, OSD_FLAVOR=default, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.build-date=20260223) 2026-03-10T07:03:47.085 INFO:journalctl@ceph.osd.0.vm02.stdout:Mar 10 07:03:46 vm02 podman[90211]: 2026-03-10 07:03:46.785205284 +0000 UTC m=+0.216718130 container start 104f83db7b903b01abe41f261a5e414835049dfd223489f2efdd67617faeb2db (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-0-activate, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, CEPH_REF=squid, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.build-date=20260223, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, io.buildah.version=1.41.3, ceph=True) 2026-03-10T07:03:47.085 INFO:journalctl@ceph.osd.0.vm02.stdout:Mar 10 07:03:46 vm02 podman[90211]: 2026-03-10 07:03:46.786387297 +0000 UTC m=+0.217900153 container attach 104f83db7b903b01abe41f261a5e414835049dfd223489f2efdd67617faeb2db (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-0-activate, OSD_FLAVOR=default, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.authors=Ceph Release Team , ceph=True, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=squid, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20260223) 2026-03-10T07:03:47.085 INFO:journalctl@ceph.osd.0.vm02.stdout:Mar 10 07:03:46 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-0-activate[90229]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T07:03:47.085 INFO:journalctl@ceph.osd.0.vm02.stdout:Mar 10 07:03:46 vm02 bash[90211]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T07:03:47.085 INFO:journalctl@ceph.osd.0.vm02.stdout:Mar 10 07:03:46 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-0-activate[90229]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T07:03:47.085 INFO:journalctl@ceph.osd.0.vm02.stdout:Mar 10 07:03:46 vm02 bash[90211]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T07:03:47.217 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:46 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:03:46.947Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm02\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"28bd35e6-1c4e-11f1-9057-21b3549603fc\", domainname=\"(none)\", instance=\"vm02\", job=\"node\", machine=\"x86_64\", nodename=\"vm02\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm02\", job=\"node\", machine=\"x86_64\", nodename=\"vm02\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T07:03:47.612 INFO:journalctl@ceph.osd.0.vm02.stdout:Mar 10 07:03:47 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-0-activate[90229]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-10T07:03:47.612 INFO:journalctl@ceph.osd.0.vm02.stdout:Mar 10 07:03:47 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-0-activate[90229]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T07:03:47.612 INFO:journalctl@ceph.osd.0.vm02.stdout:Mar 10 07:03:47 vm02 bash[90211]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-10T07:03:47.612 INFO:journalctl@ceph.osd.0.vm02.stdout:Mar 10 07:03:47 vm02 bash[90211]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T07:03:47.612 INFO:journalctl@ceph.osd.0.vm02.stdout:Mar 10 07:03:47 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-0-activate[90229]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T07:03:47.612 INFO:journalctl@ceph.osd.0.vm02.stdout:Mar 10 07:03:47 vm02 bash[90211]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T07:03:47.612 INFO:journalctl@ceph.osd.0.vm02.stdout:Mar 10 07:03:47 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-0-activate[90229]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0 2026-03-10T07:03:47.612 INFO:journalctl@ceph.osd.0.vm02.stdout:Mar 10 07:03:47 vm02 bash[90211]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0 2026-03-10T07:03:47.612 INFO:journalctl@ceph.osd.0.vm02.stdout:Mar 10 07:03:47 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-0-activate[90229]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-b7e609d3-9e84-4cb6-a098-30d277531fff/osd-block-35204e1a-6579-424a-9923-9986832c655c --path /var/lib/ceph/osd/ceph-0 --no-mon-config 2026-03-10T07:03:47.612 INFO:journalctl@ceph.osd.0.vm02.stdout:Mar 10 07:03:47 vm02 bash[90211]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-b7e609d3-9e84-4cb6-a098-30d277531fff/osd-block-35204e1a-6579-424a-9923-9986832c655c --path /var/lib/ceph/osd/ceph-0 --no-mon-config 2026-03-10T07:03:47.871 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:47 vm02 ceph-mon[87980]: from='client.15180 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:03:47.871 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:47 vm02 ceph-mon[87980]: osdmap e88: 8 total, 7 up, 8 in 2026-03-10T07:03:47.872 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:47 vm02 ceph-mon[86149]: from='client.15180 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:03:47.872 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:47 vm02 ceph-mon[86149]: osdmap e88: 8 total, 7 up, 8 in 2026-03-10T07:03:47.875 INFO:journalctl@ceph.osd.0.vm02.stdout:Mar 10 07:03:47 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-0-activate[90229]: Running command: /usr/bin/ln -snf /dev/ceph-b7e609d3-9e84-4cb6-a098-30d277531fff/osd-block-35204e1a-6579-424a-9923-9986832c655c /var/lib/ceph/osd/ceph-0/block 2026-03-10T07:03:47.875 INFO:journalctl@ceph.osd.0.vm02.stdout:Mar 10 07:03:47 vm02 bash[90211]: Running command: /usr/bin/ln -snf /dev/ceph-b7e609d3-9e84-4cb6-a098-30d277531fff/osd-block-35204e1a-6579-424a-9923-9986832c655c /var/lib/ceph/osd/ceph-0/block 2026-03-10T07:03:47.875 INFO:journalctl@ceph.osd.0.vm02.stdout:Mar 10 07:03:47 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-0-activate[90229]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-0/block 2026-03-10T07:03:47.875 INFO:journalctl@ceph.osd.0.vm02.stdout:Mar 10 07:03:47 vm02 bash[90211]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-0/block 2026-03-10T07:03:47.875 INFO:journalctl@ceph.osd.0.vm02.stdout:Mar 10 07:03:47 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-0-activate[90229]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-0 2026-03-10T07:03:47.875 INFO:journalctl@ceph.osd.0.vm02.stdout:Mar 10 07:03:47 vm02 bash[90211]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-0 2026-03-10T07:03:47.875 INFO:journalctl@ceph.osd.0.vm02.stdout:Mar 10 07:03:47 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-0-activate[90229]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0 2026-03-10T07:03:47.875 INFO:journalctl@ceph.osd.0.vm02.stdout:Mar 10 07:03:47 vm02 bash[90211]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0 2026-03-10T07:03:47.875 INFO:journalctl@ceph.osd.0.vm02.stdout:Mar 10 07:03:47 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-0-activate[90229]: --> ceph-volume lvm activate successful for osd ID: 0 2026-03-10T07:03:47.875 INFO:journalctl@ceph.osd.0.vm02.stdout:Mar 10 07:03:47 vm02 bash[90211]: --> ceph-volume lvm activate successful for osd ID: 0 2026-03-10T07:03:47.875 INFO:journalctl@ceph.osd.0.vm02.stdout:Mar 10 07:03:47 vm02 podman[90211]: 2026-03-10 07:03:47.726571277 +0000 UTC m=+1.158084123 container died 104f83db7b903b01abe41f261a5e414835049dfd223489f2efdd67617faeb2db (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-0-activate, org.opencontainers.image.authors=Ceph Release Team , io.buildah.version=1.41.3, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=squid, org.label-schema.schema-version=1.0, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.build-date=20260223, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-10T07:03:47.875 INFO:journalctl@ceph.osd.0.vm02.stdout:Mar 10 07:03:47 vm02 podman[90211]: 2026-03-10 07:03:47.747813608 +0000 UTC m=+1.179326465 container remove 104f83db7b903b01abe41f261a5e414835049dfd223489f2efdd67617faeb2db (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-0-activate, org.opencontainers.image.authors=Ceph Release Team , ceph=True, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2) 2026-03-10T07:03:47.875 INFO:journalctl@ceph.osd.0.vm02.stdout:Mar 10 07:03:47 vm02 podman[90483]: 2026-03-10 07:03:47.843668088 +0000 UTC m=+0.016047168 container create bf86ac25f7fbda38a41e101b3916217cdbf20636caaabd79bd5587d55d44ce66 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=squid, org.label-schema.vendor=CentOS, org.label-schema.build-date=20260223, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, org.opencontainers.image.authors=Ceph Release Team , GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-10T07:03:48.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:47 vm05 ceph-mon[76604]: from='client.15180 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:03:48.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:47 vm05 ceph-mon[76604]: osdmap e88: 8 total, 7 up, 8 in 2026-03-10T07:03:48.227 INFO:journalctl@ceph.osd.0.vm02.stdout:Mar 10 07:03:47 vm02 podman[90483]: 2026-03-10 07:03:47.882870716 +0000 UTC m=+0.055249796 container init bf86ac25f7fbda38a41e101b3916217cdbf20636caaabd79bd5587d55d44ce66 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-0, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20260223, io.buildah.version=1.41.3, ceph=True) 2026-03-10T07:03:48.227 INFO:journalctl@ceph.osd.0.vm02.stdout:Mar 10 07:03:47 vm02 podman[90483]: 2026-03-10 07:03:47.889660455 +0000 UTC m=+0.062039535 container start bf86ac25f7fbda38a41e101b3916217cdbf20636caaabd79bd5587d55d44ce66 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team , ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20260223, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9) 2026-03-10T07:03:48.227 INFO:journalctl@ceph.osd.0.vm02.stdout:Mar 10 07:03:47 vm02 bash[90483]: bf86ac25f7fbda38a41e101b3916217cdbf20636caaabd79bd5587d55d44ce66 2026-03-10T07:03:48.227 INFO:journalctl@ceph.osd.0.vm02.stdout:Mar 10 07:03:47 vm02 podman[90483]: 2026-03-10 07:03:47.837329472 +0000 UTC m=+0.009708552 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T07:03:48.227 INFO:journalctl@ceph.osd.0.vm02.stdout:Mar 10 07:03:47 vm02 systemd[1]: Started Ceph osd.0 for 28bd35e6-1c4e-11f1-9057-21b3549603fc. 2026-03-10T07:03:48.743 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:48 vm02 ceph-mon[86149]: pgmap v16: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 511 B/s rd, 0 op/s 2026-03-10T07:03:48.743 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:48 vm02 ceph-mon[86149]: Health check failed: Reduced data availability: 13 pgs inactive, 21 pgs peering (PG_AVAILABILITY) 2026-03-10T07:03:48.743 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:48 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:48.743 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:48 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:48.743 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:48 vm02 ceph-mon[87980]: pgmap v16: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 511 B/s rd, 0 op/s 2026-03-10T07:03:48.743 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:48 vm02 ceph-mon[87980]: Health check failed: Reduced data availability: 13 pgs inactive, 21 pgs peering (PG_AVAILABILITY) 2026-03-10T07:03:48.743 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:48 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:48.743 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:48 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:48.743 INFO:journalctl@ceph.osd.0.vm02.stdout:Mar 10 07:03:48 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-0[90493]: 2026-03-10T07:03:48.704+0000 7f52dad6c740 -1 Falling back to public interface 2026-03-10T07:03:49.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:48 vm05 ceph-mon[76604]: pgmap v16: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 511 B/s rd, 0 op/s 2026-03-10T07:03:49.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:48 vm05 ceph-mon[76604]: Health check failed: Reduced data availability: 13 pgs inactive, 21 pgs peering (PG_AVAILABILITY) 2026-03-10T07:03:49.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:48 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:49.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:48 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:49.835 INFO:journalctl@ceph.osd.0.vm02.stdout:Mar 10 07:03:49 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-0[90493]: 2026-03-10T07:03:49.547+0000 7f52dad6c740 -1 osd.0 0 read_superblock omap replica is missing. 2026-03-10T07:03:49.835 INFO:journalctl@ceph.osd.0.vm02.stdout:Mar 10 07:03:49 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-0[90493]: 2026-03-10T07:03:49.583+0000 7f52dad6c740 -1 osd.0 86 log_to_monitors true 2026-03-10T07:03:50.234 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:50 vm02 ceph-mon[86149]: pgmap v17: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 511 B/s rd, 0 op/s 2026-03-10T07:03:50.234 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:50 vm02 ceph-mon[87980]: pgmap v17: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 511 B/s rd, 0 op/s 2026-03-10T07:03:50.234 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:50 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:50.234 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:50 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:50.234 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:50 vm02 ceph-mon[87980]: from='osd.0 [v2:192.168.123.102:6802/2638051067,v1:192.168.123.102:6803/2638051067]' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-10T07:03:50.234 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:50 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:50.234 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:50 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:50.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:50 vm05 ceph-mon[76604]: pgmap v17: 161 pgs: 58 peering, 103 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 511 B/s rd, 0 op/s 2026-03-10T07:03:50.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:50 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:50.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:50 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:50.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:50 vm05 ceph-mon[76604]: from='osd.0 [v2:192.168.123.102:6802/2638051067,v1:192.168.123.102:6803/2638051067]' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-10T07:03:50.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:50 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:50.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:50 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:50.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:50 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:50.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:50 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:50.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:50 vm02 ceph-mon[86149]: from='osd.0 [v2:192.168.123.102:6802/2638051067,v1:192.168.123.102:6803/2638051067]' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-10T07:03:50.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:50 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:50.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:50 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:51.084 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:51 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: 2026-03-10T07:03:51.014+0000 7f03c12b6640 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (20 PGs are or would become offline) 2026-03-10T07:03:51.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:51 vm05 ceph-mon[76604]: from='osd.0 [v2:192.168.123.102:6802/2638051067,v1:192.168.123.102:6803/2638051067]' entity='osd.0' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]': finished 2026-03-10T07:03:51.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:51 vm05 ceph-mon[76604]: osdmap e89: 8 total, 7 up, 8 in 2026-03-10T07:03:51.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:51 vm05 ceph-mon[76604]: from='osd.0 [v2:192.168.123.102:6802/2638051067,v1:192.168.123.102:6803/2638051067]' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm02", "root=default"]}]: dispatch 2026-03-10T07:03:51.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:51 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:51.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:51 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:51.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:51 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:03:51.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:51 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T07:03:51.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:51 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:51.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:51 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T07:03:51.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:51 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:03:51.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:51 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:03:51.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:51 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:03:51.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:51 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["1"], "max": 16}]: dispatch 2026-03-10T07:03:51.584 INFO:journalctl@ceph.osd.0.vm02.stdout:Mar 10 07:03:51 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-0[90493]: 2026-03-10T07:03:51.242+0000 7f52d2316640 -1 osd.0 86 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-10T07:03:51.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:51 vm02 ceph-mon[86149]: from='osd.0 [v2:192.168.123.102:6802/2638051067,v1:192.168.123.102:6803/2638051067]' entity='osd.0' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]': finished 2026-03-10T07:03:51.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:51 vm02 ceph-mon[86149]: osdmap e89: 8 total, 7 up, 8 in 2026-03-10T07:03:51.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:51 vm02 ceph-mon[86149]: from='osd.0 [v2:192.168.123.102:6802/2638051067,v1:192.168.123.102:6803/2638051067]' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm02", "root=default"]}]: dispatch 2026-03-10T07:03:51.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:51 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:51.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:51 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:51.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:51 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:03:51.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:51 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T07:03:51.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:51 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:51.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:51 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T07:03:51.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:51 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:03:51.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:51 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:03:51.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:51 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:03:51.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:51 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["1"], "max": 16}]: dispatch 2026-03-10T07:03:51.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:51 vm02 ceph-mon[87980]: from='osd.0 [v2:192.168.123.102:6802/2638051067,v1:192.168.123.102:6803/2638051067]' entity='osd.0' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]': finished 2026-03-10T07:03:51.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:51 vm02 ceph-mon[87980]: osdmap e89: 8 total, 7 up, 8 in 2026-03-10T07:03:51.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:51 vm02 ceph-mon[87980]: from='osd.0 [v2:192.168.123.102:6802/2638051067,v1:192.168.123.102:6803/2638051067]' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm02", "root=default"]}]: dispatch 2026-03-10T07:03:51.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:51 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:51.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:51 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:51.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:51 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:03:51.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:51 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T07:03:51.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:51 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:51.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:51 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T07:03:51.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:51 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:03:51.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:51 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:03:51.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:51 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:03:51.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:51 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["1"], "max": 16}]: dispatch 2026-03-10T07:03:52.084 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:03:51 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: ::ffff:192.168.123.105 - - [10/Mar/2026:07:03:51] "GET /metrics HTTP/1.1" 200 37756 "" "Prometheus/2.51.0" 2026-03-10T07:03:52.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:52 vm02 ceph-mon[86149]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["1"], "max": 16}]: dispatch 2026-03-10T07:03:52.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:52 vm02 ceph-mon[86149]: Upgrade: unsafe to stop osd(s) at this time (20 PGs are or would become offline) 2026-03-10T07:03:52.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:52 vm02 ceph-mon[86149]: pgmap v19: 161 pgs: 9 active+undersized, 43 peering, 6 active+undersized+degraded, 103 active+clean; 457 KiB data, 120 MiB used, 160 GiB / 160 GiB avail; 21/630 objects degraded (3.333%) 2026-03-10T07:03:52.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:52 vm02 ceph-mon[86149]: Health check failed: Degraded data redundancy: 21/630 objects degraded (3.333%), 6 pgs degraded (PG_DEGRADED) 2026-03-10T07:03:52.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:52 vm02 ceph-mon[86149]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-10T07:03:52.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:52 vm02 ceph-mon[87980]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["1"], "max": 16}]: dispatch 2026-03-10T07:03:52.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:52 vm02 ceph-mon[87980]: Upgrade: unsafe to stop osd(s) at this time (20 PGs are or would become offline) 2026-03-10T07:03:52.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:52 vm02 ceph-mon[87980]: pgmap v19: 161 pgs: 9 active+undersized, 43 peering, 6 active+undersized+degraded, 103 active+clean; 457 KiB data, 120 MiB used, 160 GiB / 160 GiB avail; 21/630 objects degraded (3.333%) 2026-03-10T07:03:52.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:52 vm02 ceph-mon[87980]: Health check failed: Degraded data redundancy: 21/630 objects degraded (3.333%), 6 pgs degraded (PG_DEGRADED) 2026-03-10T07:03:52.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:52 vm02 ceph-mon[87980]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-10T07:03:53.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:52 vm05 ceph-mon[76604]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["1"], "max": 16}]: dispatch 2026-03-10T07:03:53.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:52 vm05 ceph-mon[76604]: Upgrade: unsafe to stop osd(s) at this time (20 PGs are or would become offline) 2026-03-10T07:03:53.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:52 vm05 ceph-mon[76604]: pgmap v19: 161 pgs: 9 active+undersized, 43 peering, 6 active+undersized+degraded, 103 active+clean; 457 KiB data, 120 MiB used, 160 GiB / 160 GiB avail; 21/630 objects degraded (3.333%) 2026-03-10T07:03:53.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:52 vm05 ceph-mon[76604]: Health check failed: Degraded data redundancy: 21/630 objects degraded (3.333%), 6 pgs degraded (PG_DEGRADED) 2026-03-10T07:03:53.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:52 vm05 ceph-mon[76604]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-10T07:03:53.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:53 vm02 ceph-mon[87980]: OSD bench result of 27732.565735 IOPS is not within the threshold limit range of 50.000000 IOPS and 500.000000 IOPS for osd.0. IOPS capacity is unchanged at 315.000000 IOPS. The recommendation is to establish the osd's IOPS capacity using other benchmark tools (e.g. Fio) and then override osd_mclock_max_capacity_iops_[hdd|ssd]. 2026-03-10T07:03:53.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:53 vm02 ceph-mon[87980]: osd.0 [v2:192.168.123.102:6802/2638051067,v1:192.168.123.102:6803/2638051067] boot 2026-03-10T07:03:53.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:53 vm02 ceph-mon[87980]: osdmap e90: 8 total, 8 up, 8 in 2026-03-10T07:03:53.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:53 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T07:03:53.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:53 vm02 ceph-mon[86149]: OSD bench result of 27732.565735 IOPS is not within the threshold limit range of 50.000000 IOPS and 500.000000 IOPS for osd.0. IOPS capacity is unchanged at 315.000000 IOPS. The recommendation is to establish the osd's IOPS capacity using other benchmark tools (e.g. Fio) and then override osd_mclock_max_capacity_iops_[hdd|ssd]. 2026-03-10T07:03:53.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:53 vm02 ceph-mon[86149]: osd.0 [v2:192.168.123.102:6802/2638051067,v1:192.168.123.102:6803/2638051067] boot 2026-03-10T07:03:53.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:53 vm02 ceph-mon[86149]: osdmap e90: 8 total, 8 up, 8 in 2026-03-10T07:03:53.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:53 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T07:03:54.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:53 vm05 ceph-mon[76604]: OSD bench result of 27732.565735 IOPS is not within the threshold limit range of 50.000000 IOPS and 500.000000 IOPS for osd.0. IOPS capacity is unchanged at 315.000000 IOPS. The recommendation is to establish the osd's IOPS capacity using other benchmark tools (e.g. Fio) and then override osd_mclock_max_capacity_iops_[hdd|ssd]. 2026-03-10T07:03:54.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:53 vm05 ceph-mon[76604]: osd.0 [v2:192.168.123.102:6802/2638051067,v1:192.168.123.102:6803/2638051067] boot 2026-03-10T07:03:54.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:53 vm05 ceph-mon[76604]: osdmap e90: 8 total, 8 up, 8 in 2026-03-10T07:03:54.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:53 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T07:03:54.503 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:54 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:03:54.147Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"28bd35e6-1c4e-11f1-9057-21b3549603fc\", cluster_addr=\"192.168.123.102\", device_class=\"hdd\", hostname=\"vm02\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.102\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.102\", device_class=\"hdd\", hostname=\"vm02\", instance=\"192.168.123.105:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.102\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T07:03:54.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:54 vm02 ceph-mon[86149]: pgmap v21: 161 pgs: 35 active+undersized, 23 active+undersized+degraded, 103 active+clean; 457 KiB data, 120 MiB used, 160 GiB / 160 GiB avail; 805 B/s rd, 0 op/s; 70/630 objects degraded (11.111%) 2026-03-10T07:03:54.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:54 vm02 ceph-mon[86149]: Health check cleared: PG_AVAILABILITY (was: Reduced data availability: 11 pgs inactive, 17 pgs peering) 2026-03-10T07:03:54.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:54 vm02 ceph-mon[86149]: osdmap e91: 8 total, 8 up, 8 in 2026-03-10T07:03:54.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:54 vm02 ceph-mon[87980]: pgmap v21: 161 pgs: 35 active+undersized, 23 active+undersized+degraded, 103 active+clean; 457 KiB data, 120 MiB used, 160 GiB / 160 GiB avail; 805 B/s rd, 0 op/s; 70/630 objects degraded (11.111%) 2026-03-10T07:03:54.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:54 vm02 ceph-mon[87980]: Health check cleared: PG_AVAILABILITY (was: Reduced data availability: 11 pgs inactive, 17 pgs peering) 2026-03-10T07:03:54.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:54 vm02 ceph-mon[87980]: osdmap e91: 8 total, 8 up, 8 in 2026-03-10T07:03:55.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:54 vm05 ceph-mon[76604]: pgmap v21: 161 pgs: 35 active+undersized, 23 active+undersized+degraded, 103 active+clean; 457 KiB data, 120 MiB used, 160 GiB / 160 GiB avail; 805 B/s rd, 0 op/s; 70/630 objects degraded (11.111%) 2026-03-10T07:03:55.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:54 vm05 ceph-mon[76604]: Health check cleared: PG_AVAILABILITY (was: Reduced data availability: 11 pgs inactive, 17 pgs peering) 2026-03-10T07:03:55.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:54 vm05 ceph-mon[76604]: osdmap e91: 8 total, 8 up, 8 in 2026-03-10T07:03:56.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:56 vm02 ceph-mon[87980]: pgmap v23: 161 pgs: 35 active+undersized, 23 active+undersized+degraded, 103 active+clean; 457 KiB data, 120 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s; 70/630 objects degraded (11.111%) 2026-03-10T07:03:56.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:56 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:56.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:56 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T07:03:56.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:56 vm02 ceph-mon[86149]: pgmap v23: 161 pgs: 35 active+undersized, 23 active+undersized+degraded, 103 active+clean; 457 KiB data, 120 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s; 70/630 objects degraded (11.111%) 2026-03-10T07:03:56.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:56 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:56.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:56 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T07:03:56.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:56 vm05 ceph-mon[76604]: pgmap v23: 161 pgs: 35 active+undersized, 23 active+undersized+degraded, 103 active+clean; 457 KiB data, 120 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s; 70/630 objects degraded (11.111%) 2026-03-10T07:03:56.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:56 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:03:56.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:56 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T07:03:57.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:57 vm05 ceph-mon[76604]: from='client.15180 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:03:57.253 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:03:56 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:03:56.948Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm02\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"28bd35e6-1c4e-11f1-9057-21b3549603fc\", domainname=\"(none)\", instance=\"vm02\", job=\"node\", machine=\"x86_64\", nodename=\"vm02\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm02\", job=\"node\", machine=\"x86_64\", nodename=\"vm02\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T07:03:57.334 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:57 vm02 ceph-mon[87980]: from='client.15180 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:03:57.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:57 vm02 ceph-mon[86149]: from='client.15180 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:03:58.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:58 vm05 ceph-mon[76604]: pgmap v24: 161 pgs: 161 active+clean; 457 KiB data, 121 MiB used, 160 GiB / 160 GiB avail; 1.5 KiB/s rd, 1 op/s 2026-03-10T07:03:58.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:58 vm05 ceph-mon[76604]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 70/630 objects degraded (11.111%), 23 pgs degraded) 2026-03-10T07:03:58.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:03:58 vm05 ceph-mon[76604]: Cluster is now healthy 2026-03-10T07:03:58.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:58 vm02 ceph-mon[87980]: pgmap v24: 161 pgs: 161 active+clean; 457 KiB data, 121 MiB used, 160 GiB / 160 GiB avail; 1.5 KiB/s rd, 1 op/s 2026-03-10T07:03:58.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:58 vm02 ceph-mon[87980]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 70/630 objects degraded (11.111%), 23 pgs degraded) 2026-03-10T07:03:58.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:03:58 vm02 ceph-mon[87980]: Cluster is now healthy 2026-03-10T07:03:58.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:58 vm02 ceph-mon[86149]: pgmap v24: 161 pgs: 161 active+clean; 457 KiB data, 121 MiB used, 160 GiB / 160 GiB avail; 1.5 KiB/s rd, 1 op/s 2026-03-10T07:03:58.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:58 vm02 ceph-mon[86149]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 70/630 objects degraded (11.111%), 23 pgs degraded) 2026-03-10T07:03:58.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:03:58 vm02 ceph-mon[86149]: Cluster is now healthy 2026-03-10T07:04:00.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:00 vm05 ceph-mon[76604]: pgmap v25: 161 pgs: 161 active+clean; 457 KiB data, 121 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:04:00.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:00 vm02 ceph-mon[87980]: pgmap v25: 161 pgs: 161 active+clean; 457 KiB data, 121 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:04:00.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:00 vm02 ceph-mon[86149]: pgmap v25: 161 pgs: 161 active+clean; 457 KiB data, 121 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:04:02.084 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:04:01 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: ::ffff:192.168.123.105 - - [10/Mar/2026:07:04:01] "GET /metrics HTTP/1.1" 200 37767 "" "Prometheus/2.51.0" 2026-03-10T07:04:02.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:02 vm05 ceph-mon[76604]: pgmap v26: 161 pgs: 161 active+clean; 457 KiB data, 121 MiB used, 160 GiB / 160 GiB avail; 603 B/s rd, 0 op/s 2026-03-10T07:04:02.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:02 vm02 ceph-mon[87980]: pgmap v26: 161 pgs: 161 active+clean; 457 KiB data, 121 MiB used, 160 GiB / 160 GiB avail; 603 B/s rd, 0 op/s 2026-03-10T07:04:02.584 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:02 vm02 ceph-mon[86149]: pgmap v26: 161 pgs: 161 active+clean; 457 KiB data, 121 MiB used, 160 GiB / 160 GiB avail; 603 B/s rd, 0 op/s 2026-03-10T07:04:04.222 INFO:teuthology.orchestra.run.vm02.stdout:true 2026-03-10T07:04:04.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:04 vm05 ceph-mon[76604]: pgmap v27: 161 pgs: 161 active+clean; 457 KiB data, 121 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-10T07:04:04.503 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:04:04 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:04:04.148Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.1\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.1\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"28bd35e6-1c4e-11f1-9057-21b3549603fc\", cluster_addr=\"192.168.123.102\", device_class=\"hdd\", hostname=\"vm02\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.102\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.1\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.102\", device_class=\"hdd\", hostname=\"vm02\", instance=\"192.168.123.105:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.102\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T07:04:04.584 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:04 vm02 ceph-mon[86149]: pgmap v27: 161 pgs: 161 active+clean; 457 KiB data, 121 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-10T07:04:04.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:04 vm02 ceph-mon[87980]: pgmap v27: 161 pgs: 161 active+clean; 457 KiB data, 121 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-10T07:04:04.603 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T07:04:04.603 INFO:teuthology.orchestra.run.vm02.stdout:alertmanager.a vm02 *:9093,9094 running (71s) 15s ago 5m 22.5M - 0.25.0 c8568f914cd2 520cbcc5ad98 2026-03-10T07:04:04.603 INFO:teuthology.orchestra.run.vm02.stdout:grafana.a vm05 *:3000 running (61s) 27s ago 5m 42.2M - dad864ee21e9 34567dcb4b51 2026-03-10T07:04:04.603 INFO:teuthology.orchestra.run.vm02.stdout:iscsi.foo.vm02.iphfbm vm02 running (50s) 15s ago 5m 45.9M - 3.5 e1d6a67b021e a06caff18850 2026-03-10T07:04:04.603 INFO:teuthology.orchestra.run.vm02.stdout:mgr.x vm05 *:8443,9283,8765 running (48s) 27s ago 7m 485M - 19.2.3-678-ge911bdeb 654f31e6858e cdd4c8db2d43 2026-03-10T07:04:04.603 INFO:teuthology.orchestra.run.vm02.stdout:mgr.y vm02 *:8443,9283,8765 running (60s) 15s ago 7m 548M - 19.2.3-678-ge911bdeb 654f31e6858e e1dc22afcf31 2026-03-10T07:04:04.603 INFO:teuthology.orchestra.run.vm02.stdout:mon.a vm02 running (43s) 15s ago 7m 43.7M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 194e84dd73c4 2026-03-10T07:04:04.603 INFO:teuthology.orchestra.run.vm02.stdout:mon.b vm05 running (28s) 27s ago 7m 20.6M 2048M 19.2.3-678-ge911bdeb 654f31e6858e e901cead026d 2026-03-10T07:04:04.603 INFO:teuthology.orchestra.run.vm02.stdout:mon.c vm02 running (34s) 15s ago 7m 38.7M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 959c5054b3d9 2026-03-10T07:04:04.603 INFO:teuthology.orchestra.run.vm02.stdout:node-exporter.a vm02 *:9100 running (75s) 15s ago 5m 9256k - 1.7.0 72c9c2088986 0bc0b34c732a 2026-03-10T07:04:04.603 INFO:teuthology.orchestra.run.vm02.stdout:node-exporter.b vm05 *:9100 running (68s) 27s ago 5m 8946k - 1.7.0 72c9c2088986 0129ed456f9d 2026-03-10T07:04:04.603 INFO:teuthology.orchestra.run.vm02.stdout:osd.0 vm02 running (16s) 15s ago 6m 12.8M 4096M 19.2.3-678-ge911bdeb 654f31e6858e bf86ac25f7fb 2026-03-10T07:04:04.603 INFO:teuthology.orchestra.run.vm02.stdout:osd.1 vm02 running (6m) 15s ago 6m 56.8M 4096M 17.2.0 e1d6a67b021e a9f4c8b44f48 2026-03-10T07:04:04.603 INFO:teuthology.orchestra.run.vm02.stdout:osd.2 vm02 running (6m) 15s ago 6m 50.7M 4096M 17.2.0 e1d6a67b021e 6409c5be18aa 2026-03-10T07:04:04.603 INFO:teuthology.orchestra.run.vm02.stdout:osd.3 vm02 running (6m) 15s ago 6m 50.2M 4096M 17.2.0 e1d6a67b021e 5df7cfae139f 2026-03-10T07:04:04.603 INFO:teuthology.orchestra.run.vm02.stdout:osd.4 vm05 running (6m) 27s ago 6m 53.0M 4096M 17.2.0 e1d6a67b021e dcf9e1b40e11 2026-03-10T07:04:04.603 INFO:teuthology.orchestra.run.vm02.stdout:osd.5 vm05 running (6m) 27s ago 6m 50.9M 4096M 17.2.0 e1d6a67b021e ef1a70593f89 2026-03-10T07:04:04.603 INFO:teuthology.orchestra.run.vm02.stdout:osd.6 vm05 running (6m) 27s ago 6m 53.6M 4096M 17.2.0 e1d6a67b021e 9f83e32d5eb6 2026-03-10T07:04:04.603 INFO:teuthology.orchestra.run.vm02.stdout:osd.7 vm05 running (5m) 27s ago 5m 52.2M 4096M 17.2.0 e1d6a67b021e 83d454094982 2026-03-10T07:04:04.603 INFO:teuthology.orchestra.run.vm02.stdout:prometheus.a vm05 *:9095 running (49s) 27s ago 5m 40.4M - 2.51.0 1d3b7f56885b 2f14ff887bc7 2026-03-10T07:04:04.603 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.kkmsll vm02 *:8000 running (5m) 15s ago 5m 93.9M - 17.2.0 e1d6a67b021e ef903c439808 2026-03-10T07:04:04.603 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm05.bmslvs vm05 *:8000 running (5m) 27s ago 5m 93.8M - 17.2.0 e1d6a67b021e acd35f4810d9 2026-03-10T07:04:04.603 INFO:teuthology.orchestra.run.vm02.stdout:rgw.smpl.vm02.kyvfxo vm02 *:80 running (5m) 15s ago 5m 91.8M - 17.2.0 e1d6a67b021e 6c68381e5378 2026-03-10T07:04:04.603 INFO:teuthology.orchestra.run.vm02.stdout:rgw.smpl.vm05.xjafam vm05 *:80 running (5m) 27s ago 5m 93.7M - 17.2.0 e1d6a67b021e 62a81876b05e 2026-03-10T07:04:04.831 INFO:teuthology.orchestra.run.vm02.stdout:{ 2026-03-10T07:04:04.831 INFO:teuthology.orchestra.run.vm02.stdout: "mon": { 2026-03-10T07:04:04.831 INFO:teuthology.orchestra.run.vm02.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 3 2026-03-10T07:04:04.831 INFO:teuthology.orchestra.run.vm02.stdout: }, 2026-03-10T07:04:04.831 INFO:teuthology.orchestra.run.vm02.stdout: "mgr": { 2026-03-10T07:04:04.831 INFO:teuthology.orchestra.run.vm02.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-10T07:04:04.831 INFO:teuthology.orchestra.run.vm02.stdout: }, 2026-03-10T07:04:04.831 INFO:teuthology.orchestra.run.vm02.stdout: "osd": { 2026-03-10T07:04:04.831 INFO:teuthology.orchestra.run.vm02.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 7, 2026-03-10T07:04:04.831 INFO:teuthology.orchestra.run.vm02.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 1 2026-03-10T07:04:04.831 INFO:teuthology.orchestra.run.vm02.stdout: }, 2026-03-10T07:04:04.831 INFO:teuthology.orchestra.run.vm02.stdout: "rgw": { 2026-03-10T07:04:04.831 INFO:teuthology.orchestra.run.vm02.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 4 2026-03-10T07:04:04.831 INFO:teuthology.orchestra.run.vm02.stdout: }, 2026-03-10T07:04:04.831 INFO:teuthology.orchestra.run.vm02.stdout: "overall": { 2026-03-10T07:04:04.831 INFO:teuthology.orchestra.run.vm02.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 11, 2026-03-10T07:04:04.831 INFO:teuthology.orchestra.run.vm02.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 6 2026-03-10T07:04:04.831 INFO:teuthology.orchestra.run.vm02.stdout: } 2026-03-10T07:04:04.831 INFO:teuthology.orchestra.run.vm02.stdout:} 2026-03-10T07:04:05.034 INFO:teuthology.orchestra.run.vm02.stdout:{ 2026-03-10T07:04:05.034 INFO:teuthology.orchestra.run.vm02.stdout: "target_image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", 2026-03-10T07:04:05.034 INFO:teuthology.orchestra.run.vm02.stdout: "in_progress": true, 2026-03-10T07:04:05.034 INFO:teuthology.orchestra.run.vm02.stdout: "which": "Upgrading all daemon types on all hosts", 2026-03-10T07:04:05.034 INFO:teuthology.orchestra.run.vm02.stdout: "services_complete": [ 2026-03-10T07:04:05.034 INFO:teuthology.orchestra.run.vm02.stdout: "mgr", 2026-03-10T07:04:05.034 INFO:teuthology.orchestra.run.vm02.stdout: "mon" 2026-03-10T07:04:05.034 INFO:teuthology.orchestra.run.vm02.stdout: ], 2026-03-10T07:04:05.034 INFO:teuthology.orchestra.run.vm02.stdout: "progress": "6/23 daemons upgraded", 2026-03-10T07:04:05.034 INFO:teuthology.orchestra.run.vm02.stdout: "message": "Currently upgrading osd daemons", 2026-03-10T07:04:05.034 INFO:teuthology.orchestra.run.vm02.stdout: "is_paused": false 2026-03-10T07:04:05.034 INFO:teuthology.orchestra.run.vm02.stdout:} 2026-03-10T07:04:05.271 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_OK 2026-03-10T07:04:05.334 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:05 vm02 ceph-mon[87980]: from='client.34170 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:04:05.334 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:05 vm02 ceph-mon[87980]: from='client.44104 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:04:05.334 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:05 vm02 ceph-mon[87980]: from='client.44110 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:04:05.334 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:05 vm02 ceph-mon[87980]: from='client.? 192.168.123.102:0/2241412559' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:04:05.334 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:05 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:05.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:05 vm02 ceph-mon[86149]: from='client.34170 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:04:05.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:05 vm02 ceph-mon[86149]: from='client.44104 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:04:05.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:05 vm02 ceph-mon[86149]: from='client.44110 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:04:05.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:05 vm02 ceph-mon[86149]: from='client.? 192.168.123.102:0/2241412559' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:04:05.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:05 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:05.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:05 vm05 ceph-mon[76604]: from='client.34170 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:04:05.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:05 vm05 ceph-mon[76604]: from='client.44104 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:04:05.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:05 vm05 ceph-mon[76604]: from='client.44110 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:04:05.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:05 vm05 ceph-mon[76604]: from='client.? 192.168.123.102:0/2241412559' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:04:05.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:05 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:06.336 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:06 vm02 ceph-mon[86149]: from='client.44119 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:04:06.336 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:06 vm02 ceph-mon[86149]: pgmap v28: 161 pgs: 161 active+clean; 457 KiB data, 121 MiB used, 160 GiB / 160 GiB avail; 892 B/s rd, 0 op/s 2026-03-10T07:04:06.336 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:06 vm02 ceph-mon[86149]: from='client.? 192.168.123.102:0/1508046062' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T07:04:06.336 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:06 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["1"], "max": 16}]: dispatch 2026-03-10T07:04:06.340 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:06 vm02 ceph-mon[87980]: from='client.44119 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:04:06.340 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:06 vm02 ceph-mon[87980]: pgmap v28: 161 pgs: 161 active+clean; 457 KiB data, 121 MiB used, 160 GiB / 160 GiB avail; 892 B/s rd, 0 op/s 2026-03-10T07:04:06.340 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:06 vm02 ceph-mon[87980]: from='client.? 192.168.123.102:0/1508046062' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T07:04:06.340 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:06 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["1"], "max": 16}]: dispatch 2026-03-10T07:04:06.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:06 vm05 ceph-mon[76604]: from='client.44119 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:04:06.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:06 vm05 ceph-mon[76604]: pgmap v28: 161 pgs: 161 active+clean; 457 KiB data, 121 MiB used, 160 GiB / 160 GiB avail; 892 B/s rd, 0 op/s 2026-03-10T07:04:06.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:06 vm05 ceph-mon[76604]: from='client.? 192.168.123.102:0/1508046062' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T07:04:06.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:06 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["1"], "max": 16}]: dispatch 2026-03-10T07:04:07.253 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:04:06 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:04:06.947Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm02\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"28bd35e6-1c4e-11f1-9057-21b3549603fc\", domainname=\"(none)\", instance=\"vm02\", job=\"node\", machine=\"x86_64\", nodename=\"vm02\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm02\", job=\"node\", machine=\"x86_64\", nodename=\"vm02\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T07:04:07.335 INFO:journalctl@ceph.osd.1.vm02.stdout:Mar 10 07:04:06 vm02 systemd[1]: Stopping Ceph osd.1 for 28bd35e6-1c4e-11f1-9057-21b3549603fc... 2026-03-10T07:04:07.335 INFO:journalctl@ceph.osd.1.vm02.stdout:Mar 10 07:04:07 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-1[60137]: 2026-03-10T07:04:07.039+0000 7efc4183a700 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-10T07:04:07.335 INFO:journalctl@ceph.osd.1.vm02.stdout:Mar 10 07:04:07 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-1[60137]: 2026-03-10T07:04:07.039+0000 7efc4183a700 -1 osd.1 91 *** Got signal Terminated *** 2026-03-10T07:04:07.335 INFO:journalctl@ceph.osd.1.vm02.stdout:Mar 10 07:04:07 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-1[60137]: 2026-03-10T07:04:07.039+0000 7efc4183a700 -1 osd.1 91 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-10T07:04:07.644 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:07 vm02 ceph-mon[87980]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["1"], "max": 16}]: dispatch 2026-03-10T07:04:07.644 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:07 vm02 ceph-mon[87980]: Upgrade: osd.1 is safe to restart 2026-03-10T07:04:07.644 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:07 vm02 ceph-mon[87980]: from='client.15180 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:04:07.644 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:07 vm02 ceph-mon[87980]: Upgrade: Updating osd.1 2026-03-10T07:04:07.644 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:07 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:07.644 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:07 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.1"}]: dispatch 2026-03-10T07:04:07.644 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:07 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:04:07.644 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:07 vm02 ceph-mon[87980]: Deploying daemon osd.1 on vm02 2026-03-10T07:04:07.644 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:07 vm02 ceph-mon[87980]: osd.1 marked itself down and dead 2026-03-10T07:04:07.644 INFO:journalctl@ceph.osd.1.vm02.stdout:Mar 10 07:04:07 vm02 podman[92143]: 2026-03-10 07:04:07.398324838 +0000 UTC m=+0.371134885 container died a9f4c8b44f48d646cde417f8db53b511c30e7ab305530282fad752b034f4b5d7 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-1, io.openshift.tags=base centos centos-stream, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, CEPH_POINT_RELEASE=-17.2.0, io.buildah.version=1.19.8, io.k8s.display-name=CentOS Stream 8, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, distribution-scope=public, version=8, architecture=x86_64, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, com.redhat.component=centos-stream-container, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, maintainer=Guillaume Abrioux , vcs-type=git, GIT_REPO=https://github.com/ceph/ceph-container.git, build-date=2022-05-03T08:36:31.336870, io.openshift.expose-services=, vendor=Red Hat, Inc., GIT_CLEAN=True, name=centos-stream, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, release=754, GIT_BRANCH=HEAD, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, ceph=True, RELEASE=HEAD, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly.) 2026-03-10T07:04:07.644 INFO:journalctl@ceph.osd.1.vm02.stdout:Mar 10 07:04:07 vm02 podman[92143]: 2026-03-10 07:04:07.425471201 +0000 UTC m=+0.398281248 container remove a9f4c8b44f48d646cde417f8db53b511c30e7ab305530282fad752b034f4b5d7 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-1, architecture=x86_64, maintainer=Guillaume Abrioux , vendor=Red Hat, Inc., version=8, GIT_CLEAN=True, RELEASE=HEAD, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, CEPH_POINT_RELEASE=-17.2.0, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, GIT_BRANCH=HEAD, name=centos-stream, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, GIT_REPO=https://github.com/ceph/ceph-container.git, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, io.k8s.display-name=CentOS Stream 8, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, distribution-scope=public, vcs-type=git, com.redhat.component=centos-stream-container, io.openshift.expose-services=, ceph=True, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, io.buildah.version=1.19.8, build-date=2022-05-03T08:36:31.336870, io.openshift.tags=base centos centos-stream, release=754, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754) 2026-03-10T07:04:07.644 INFO:journalctl@ceph.osd.1.vm02.stdout:Mar 10 07:04:07 vm02 bash[92143]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-1 2026-03-10T07:04:07.644 INFO:journalctl@ceph.osd.1.vm02.stdout:Mar 10 07:04:07 vm02 podman[92210]: 2026-03-10 07:04:07.551508355 +0000 UTC m=+0.015445622 container create f9895750bfcfa7f703b7fd57dcb32b8c36880a75f2067ea8fb4561870c00077d (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-1-deactivate, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.schema-version=1.0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.license=GPLv2) 2026-03-10T07:04:07.644 INFO:journalctl@ceph.osd.1.vm02.stdout:Mar 10 07:04:07 vm02 podman[92210]: 2026-03-10 07:04:07.589845143 +0000 UTC m=+0.053782410 container init f9895750bfcfa7f703b7fd57dcb32b8c36880a75f2067ea8fb4561870c00077d (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-1-deactivate, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.schema-version=1.0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, ceph=True, org.label-schema.build-date=20260223, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default) 2026-03-10T07:04:07.644 INFO:journalctl@ceph.osd.1.vm02.stdout:Mar 10 07:04:07 vm02 podman[92210]: 2026-03-10 07:04:07.593987376 +0000 UTC m=+0.057924633 container start f9895750bfcfa7f703b7fd57dcb32b8c36880a75f2067ea8fb4561870c00077d (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-1-deactivate, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, CEPH_REF=squid, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team , io.buildah.version=1.41.3, OSD_FLAVOR=default, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git) 2026-03-10T07:04:07.644 INFO:journalctl@ceph.osd.1.vm02.stdout:Mar 10 07:04:07 vm02 podman[92210]: 2026-03-10 07:04:07.595190699 +0000 UTC m=+0.059127966 container attach f9895750bfcfa7f703b7fd57dcb32b8c36880a75f2067ea8fb4561870c00077d (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-1-deactivate, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.license=GPLv2, ceph=True, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, OSD_FLAVOR=default) 2026-03-10T07:04:07.645 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:07 vm02 ceph-mon[86149]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["1"], "max": 16}]: dispatch 2026-03-10T07:04:07.645 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:07 vm02 ceph-mon[86149]: Upgrade: osd.1 is safe to restart 2026-03-10T07:04:07.645 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:07 vm02 ceph-mon[86149]: from='client.15180 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:04:07.645 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:07 vm02 ceph-mon[86149]: Upgrade: Updating osd.1 2026-03-10T07:04:07.645 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:07 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:07.645 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:07 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.1"}]: dispatch 2026-03-10T07:04:07.645 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:07 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:04:07.645 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:07 vm02 ceph-mon[86149]: Deploying daemon osd.1 on vm02 2026-03-10T07:04:07.645 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:07 vm02 ceph-mon[86149]: osd.1 marked itself down and dead 2026-03-10T07:04:07.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:07 vm05 ceph-mon[76604]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["1"], "max": 16}]: dispatch 2026-03-10T07:04:07.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:07 vm05 ceph-mon[76604]: Upgrade: osd.1 is safe to restart 2026-03-10T07:04:07.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:07 vm05 ceph-mon[76604]: from='client.15180 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:04:07.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:07 vm05 ceph-mon[76604]: Upgrade: Updating osd.1 2026-03-10T07:04:07.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:07 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:07.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:07 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.1"}]: dispatch 2026-03-10T07:04:07.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:07 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:04:07.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:07 vm05 ceph-mon[76604]: Deploying daemon osd.1 on vm02 2026-03-10T07:04:07.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:07 vm05 ceph-mon[76604]: osd.1 marked itself down and dead 2026-03-10T07:04:07.916 INFO:journalctl@ceph.osd.1.vm02.stdout:Mar 10 07:04:07 vm02 podman[92210]: 2026-03-10 07:04:07.544968213 +0000 UTC m=+0.008905480 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T07:04:07.916 INFO:journalctl@ceph.osd.1.vm02.stdout:Mar 10 07:04:07 vm02 podman[92210]: 2026-03-10 07:04:07.713762589 +0000 UTC m=+0.177699856 container died f9895750bfcfa7f703b7fd57dcb32b8c36880a75f2067ea8fb4561870c00077d (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-1-deactivate, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.build-date=20260223, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_REF=squid, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, io.buildah.version=1.41.3) 2026-03-10T07:04:07.916 INFO:journalctl@ceph.osd.1.vm02.stdout:Mar 10 07:04:07 vm02 podman[92210]: 2026-03-10 07:04:07.733191927 +0000 UTC m=+0.197129194 container remove f9895750bfcfa7f703b7fd57dcb32b8c36880a75f2067ea8fb4561870c00077d (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-1-deactivate, org.label-schema.vendor=CentOS, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, OSD_FLAVOR=default, org.label-schema.build-date=20260223, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image) 2026-03-10T07:04:07.916 INFO:journalctl@ceph.osd.1.vm02.stdout:Mar 10 07:04:07 vm02 systemd[1]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@osd.1.service: Deactivated successfully. 2026-03-10T07:04:07.916 INFO:journalctl@ceph.osd.1.vm02.stdout:Mar 10 07:04:07 vm02 systemd[1]: Stopped Ceph osd.1 for 28bd35e6-1c4e-11f1-9057-21b3549603fc. 2026-03-10T07:04:07.916 INFO:journalctl@ceph.osd.1.vm02.stdout:Mar 10 07:04:07 vm02 systemd[1]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@osd.1.service: Consumed 3.370s CPU time. 2026-03-10T07:04:07.916 INFO:journalctl@ceph.osd.1.vm02.stdout:Mar 10 07:04:07 vm02 systemd[1]: Starting Ceph osd.1 for 28bd35e6-1c4e-11f1-9057-21b3549603fc... 2026-03-10T07:04:08.335 INFO:journalctl@ceph.osd.1.vm02.stdout:Mar 10 07:04:08 vm02 podman[92310]: 2026-03-10 07:04:08.001618193 +0000 UTC m=+0.014865345 container create 7728ffd32c6e6bf42bc8c0980c8ebb979a4c9dd9290632cecb06d4e285fc2c63 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-1-activate, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, io.buildah.version=1.41.3, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=squid, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20260223, OSD_FLAVOR=default) 2026-03-10T07:04:08.335 INFO:journalctl@ceph.osd.1.vm02.stdout:Mar 10 07:04:08 vm02 podman[92310]: 2026-03-10 07:04:08.038540193 +0000 UTC m=+0.051787355 container init 7728ffd32c6e6bf42bc8c0980c8ebb979a4c9dd9290632cecb06d4e285fc2c63 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-1-activate, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS, CEPH_REF=squid, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.build-date=20260223, ceph=True, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2) 2026-03-10T07:04:08.335 INFO:journalctl@ceph.osd.1.vm02.stdout:Mar 10 07:04:08 vm02 podman[92310]: 2026-03-10 07:04:08.041183109 +0000 UTC m=+0.054430272 container start 7728ffd32c6e6bf42bc8c0980c8ebb979a4c9dd9290632cecb06d4e285fc2c63 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-1-activate, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=squid, org.label-schema.schema-version=1.0, org.label-schema.build-date=20260223, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, io.buildah.version=1.41.3) 2026-03-10T07:04:08.335 INFO:journalctl@ceph.osd.1.vm02.stdout:Mar 10 07:04:08 vm02 podman[92310]: 2026-03-10 07:04:08.04821879 +0000 UTC m=+0.061465952 container attach 7728ffd32c6e6bf42bc8c0980c8ebb979a4c9dd9290632cecb06d4e285fc2c63 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-1-activate, io.buildah.version=1.41.3, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, OSD_FLAVOR=default, org.label-schema.build-date=20260223, org.opencontainers.image.authors=Ceph Release Team , FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=squid, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-10T07:04:08.335 INFO:journalctl@ceph.osd.1.vm02.stdout:Mar 10 07:04:08 vm02 podman[92310]: 2026-03-10 07:04:07.995689085 +0000 UTC m=+0.008936247 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T07:04:08.335 INFO:journalctl@ceph.osd.1.vm02.stdout:Mar 10 07:04:08 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-1-activate[92321]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T07:04:08.335 INFO:journalctl@ceph.osd.1.vm02.stdout:Mar 10 07:04:08 vm02 bash[92310]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T07:04:08.335 INFO:journalctl@ceph.osd.1.vm02.stdout:Mar 10 07:04:08 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-1-activate[92321]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T07:04:08.335 INFO:journalctl@ceph.osd.1.vm02.stdout:Mar 10 07:04:08 vm02 bash[92310]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T07:04:08.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:08 vm05 ceph-mon[76604]: pgmap v29: 161 pgs: 161 active+clean; 457 KiB data, 121 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:04:08.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:08 vm05 ceph-mon[76604]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-10T07:04:08.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:08 vm05 ceph-mon[76604]: osdmap e92: 8 total, 7 up, 8 in 2026-03-10T07:04:08.834 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:08 vm02 ceph-mon[86149]: pgmap v29: 161 pgs: 161 active+clean; 457 KiB data, 121 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:04:08.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:08 vm02 ceph-mon[86149]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-10T07:04:08.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:08 vm02 ceph-mon[86149]: osdmap e92: 8 total, 7 up, 8 in 2026-03-10T07:04:08.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:08 vm02 ceph-mon[87980]: pgmap v29: 161 pgs: 161 active+clean; 457 KiB data, 121 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:04:08.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:08 vm02 ceph-mon[87980]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-10T07:04:08.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:08 vm02 ceph-mon[87980]: osdmap e92: 8 total, 7 up, 8 in 2026-03-10T07:04:08.835 INFO:journalctl@ceph.osd.1.vm02.stdout:Mar 10 07:04:08 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-1-activate[92321]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-10T07:04:08.835 INFO:journalctl@ceph.osd.1.vm02.stdout:Mar 10 07:04:08 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-1-activate[92321]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T07:04:08.835 INFO:journalctl@ceph.osd.1.vm02.stdout:Mar 10 07:04:08 vm02 bash[92310]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-10T07:04:08.835 INFO:journalctl@ceph.osd.1.vm02.stdout:Mar 10 07:04:08 vm02 bash[92310]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T07:04:08.835 INFO:journalctl@ceph.osd.1.vm02.stdout:Mar 10 07:04:08 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-1-activate[92321]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T07:04:08.835 INFO:journalctl@ceph.osd.1.vm02.stdout:Mar 10 07:04:08 vm02 bash[92310]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T07:04:08.835 INFO:journalctl@ceph.osd.1.vm02.stdout:Mar 10 07:04:08 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-1-activate[92321]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-1 2026-03-10T07:04:08.835 INFO:journalctl@ceph.osd.1.vm02.stdout:Mar 10 07:04:08 vm02 bash[92310]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-1 2026-03-10T07:04:08.835 INFO:journalctl@ceph.osd.1.vm02.stdout:Mar 10 07:04:08 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-1-activate[92321]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-8f45b394-d04a-4de0-983f-4c5e107b01f6/osd-block-eb30f5d3-2080-416b-b174-8350b4a9f19d --path /var/lib/ceph/osd/ceph-1 --no-mon-config 2026-03-10T07:04:08.835 INFO:journalctl@ceph.osd.1.vm02.stdout:Mar 10 07:04:08 vm02 bash[92310]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-8f45b394-d04a-4de0-983f-4c5e107b01f6/osd-block-eb30f5d3-2080-416b-b174-8350b4a9f19d --path /var/lib/ceph/osd/ceph-1 --no-mon-config 2026-03-10T07:04:09.338 INFO:journalctl@ceph.osd.1.vm02.stdout:Mar 10 07:04:08 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-1-activate[92321]: Running command: /usr/bin/ln -snf /dev/ceph-8f45b394-d04a-4de0-983f-4c5e107b01f6/osd-block-eb30f5d3-2080-416b-b174-8350b4a9f19d /var/lib/ceph/osd/ceph-1/block 2026-03-10T07:04:09.338 INFO:journalctl@ceph.osd.1.vm02.stdout:Mar 10 07:04:08 vm02 bash[92310]: Running command: /usr/bin/ln -snf /dev/ceph-8f45b394-d04a-4de0-983f-4c5e107b01f6/osd-block-eb30f5d3-2080-416b-b174-8350b4a9f19d /var/lib/ceph/osd/ceph-1/block 2026-03-10T07:04:09.338 INFO:journalctl@ceph.osd.1.vm02.stdout:Mar 10 07:04:08 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-1-activate[92321]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-1/block 2026-03-10T07:04:09.338 INFO:journalctl@ceph.osd.1.vm02.stdout:Mar 10 07:04:08 vm02 bash[92310]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-1/block 2026-03-10T07:04:09.338 INFO:journalctl@ceph.osd.1.vm02.stdout:Mar 10 07:04:08 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-1-activate[92321]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-1 2026-03-10T07:04:09.338 INFO:journalctl@ceph.osd.1.vm02.stdout:Mar 10 07:04:08 vm02 bash[92310]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-1 2026-03-10T07:04:09.338 INFO:journalctl@ceph.osd.1.vm02.stdout:Mar 10 07:04:08 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-1-activate[92321]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-1 2026-03-10T07:04:09.338 INFO:journalctl@ceph.osd.1.vm02.stdout:Mar 10 07:04:08 vm02 bash[92310]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-1 2026-03-10T07:04:09.338 INFO:journalctl@ceph.osd.1.vm02.stdout:Mar 10 07:04:08 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-1-activate[92321]: --> ceph-volume lvm activate successful for osd ID: 1 2026-03-10T07:04:09.338 INFO:journalctl@ceph.osd.1.vm02.stdout:Mar 10 07:04:08 vm02 bash[92310]: --> ceph-volume lvm activate successful for osd ID: 1 2026-03-10T07:04:09.338 INFO:journalctl@ceph.osd.1.vm02.stdout:Mar 10 07:04:08 vm02 podman[92310]: 2026-03-10 07:04:08.901505557 +0000 UTC m=+0.914752719 container died 7728ffd32c6e6bf42bc8c0980c8ebb979a4c9dd9290632cecb06d4e285fc2c63 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-1-activate, CEPH_REF=squid, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, org.label-schema.build-date=20260223, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True) 2026-03-10T07:04:09.338 INFO:journalctl@ceph.osd.1.vm02.stdout:Mar 10 07:04:08 vm02 podman[92310]: 2026-03-10 07:04:08.926235957 +0000 UTC m=+0.939483109 container remove 7728ffd32c6e6bf42bc8c0980c8ebb979a4c9dd9290632cecb06d4e285fc2c63 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-1-activate, ceph=True, CEPH_REF=squid, io.buildah.version=1.41.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.license=GPLv2, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.build-date=20260223, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image) 2026-03-10T07:04:09.338 INFO:journalctl@ceph.osd.1.vm02.stdout:Mar 10 07:04:09 vm02 podman[92558]: 2026-03-10 07:04:09.007390681 +0000 UTC m=+0.014950835 container create 6a939dbe6adcad61430e0ca7862a1f559c5e5089c646ad8ee12a83ef98643fe5 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-1, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team , io.buildah.version=1.41.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20260223, OSD_FLAVOR=default) 2026-03-10T07:04:09.339 INFO:journalctl@ceph.osd.1.vm02.stdout:Mar 10 07:04:09 vm02 podman[92558]: 2026-03-10 07:04:09.042980677 +0000 UTC m=+0.050540831 container init 6a939dbe6adcad61430e0ca7862a1f559c5e5089c646ad8ee12a83ef98643fe5 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-1, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, CEPH_REF=squid, io.buildah.version=1.41.3, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, ceph=True, org.label-schema.schema-version=1.0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git) 2026-03-10T07:04:09.339 INFO:journalctl@ceph.osd.1.vm02.stdout:Mar 10 07:04:09 vm02 podman[92558]: 2026-03-10 07:04:09.048177204 +0000 UTC m=+0.055737358 container start 6a939dbe6adcad61430e0ca7862a1f559c5e5089c646ad8ee12a83ef98643fe5 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-1, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.41.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, ceph=True, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20260223, org.opencontainers.image.authors=Ceph Release Team ) 2026-03-10T07:04:09.339 INFO:journalctl@ceph.osd.1.vm02.stdout:Mar 10 07:04:09 vm02 bash[92558]: 6a939dbe6adcad61430e0ca7862a1f559c5e5089c646ad8ee12a83ef98643fe5 2026-03-10T07:04:09.339 INFO:journalctl@ceph.osd.1.vm02.stdout:Mar 10 07:04:09 vm02 podman[92558]: 2026-03-10 07:04:09.001277508 +0000 UTC m=+0.008837672 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T07:04:09.339 INFO:journalctl@ceph.osd.1.vm02.stdout:Mar 10 07:04:09 vm02 systemd[1]: Started Ceph osd.1 for 28bd35e6-1c4e-11f1-9057-21b3549603fc. 2026-03-10T07:04:09.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:09 vm02 ceph-mon[86149]: osdmap e93: 8 total, 7 up, 8 in 2026-03-10T07:04:09.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:09 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:09.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:09 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:09.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:09 vm02 ceph-mon[87980]: osdmap e93: 8 total, 7 up, 8 in 2026-03-10T07:04:09.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:09 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:09.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:09 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:09.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:09 vm05 ceph-mon[76604]: osdmap e93: 8 total, 7 up, 8 in 2026-03-10T07:04:09.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:09 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:09.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:09 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:10.052 INFO:journalctl@ceph.osd.1.vm02.stdout:Mar 10 07:04:09 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-1[92568]: 2026-03-10T07:04:09.877+0000 7f34a0825740 -1 Falling back to public interface 2026-03-10T07:04:10.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:10 vm02 ceph-mon[87980]: pgmap v32: 161 pgs: 21 stale+active+clean, 140 active+clean; 457 KiB data, 121 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:04:10.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:10 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:10.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:10 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T07:04:10.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:10 vm02 ceph-mon[86149]: pgmap v32: 161 pgs: 21 stale+active+clean, 140 active+clean; 457 KiB data, 121 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:04:10.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:10 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:10.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:10 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T07:04:11.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:10 vm05 ceph-mon[76604]: pgmap v32: 161 pgs: 21 stale+active+clean, 140 active+clean; 457 KiB data, 121 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:04:11.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:10 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:11.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:10 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T07:04:11.084 INFO:journalctl@ceph.osd.1.vm02.stdout:Mar 10 07:04:10 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-1[92568]: 2026-03-10T07:04:10.732+0000 7f34a0825740 -1 osd.1 0 read_superblock omap replica is missing. 2026-03-10T07:04:11.084 INFO:journalctl@ceph.osd.1.vm02.stdout:Mar 10 07:04:10 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-1[92568]: 2026-03-10T07:04:10.776+0000 7f34a0825740 -1 osd.1 91 log_to_monitors true 2026-03-10T07:04:11.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:11 vm02 ceph-mon[87980]: from='osd.1 [v2:192.168.123.102:6810/2390502639,v1:192.168.123.102:6811/2390502639]' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-10T07:04:11.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:11 vm02 ceph-mon[87980]: from='osd.1 ' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-10T07:04:11.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:11 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:11.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:11 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:11.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:11 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:11.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:11 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:11.836 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:04:11 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: ::ffff:192.168.123.105 - - [10/Mar/2026:07:04:11] "GET /metrics HTTP/1.1" 200 37960 "" "Prometheus/2.51.0" 2026-03-10T07:04:11.836 INFO:journalctl@ceph.osd.1.vm02.stdout:Mar 10 07:04:11 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-1[92568]: 2026-03-10T07:04:11.580+0000 7f34985d0640 -1 osd.1 91 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-10T07:04:11.836 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:11 vm02 ceph-mon[86149]: from='osd.1 [v2:192.168.123.102:6810/2390502639,v1:192.168.123.102:6811/2390502639]' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-10T07:04:11.836 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:11 vm02 ceph-mon[86149]: from='osd.1 ' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-10T07:04:11.836 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:11 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:11.836 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:11 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:11.836 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:11 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:11.836 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:11 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:12.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:11 vm05 ceph-mon[76604]: from='osd.1 [v2:192.168.123.102:6810/2390502639,v1:192.168.123.102:6811/2390502639]' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-10T07:04:12.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:11 vm05 ceph-mon[76604]: from='osd.1 ' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-10T07:04:12.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:11 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:12.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:11 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:12.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:11 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:12.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:11 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:12.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:12 vm02 ceph-mon[87980]: pgmap v33: 161 pgs: 18 active+undersized, 11 stale+active+clean, 12 active+undersized+degraded, 120 active+clean; 457 KiB data, 122 MiB used, 160 GiB / 160 GiB avail; 45/630 objects degraded (7.143%) 2026-03-10T07:04:12.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:12 vm02 ceph-mon[87980]: from='osd.1 ' entity='osd.1' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]': finished 2026-03-10T07:04:12.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:12 vm02 ceph-mon[87980]: from='osd.1 [v2:192.168.123.102:6810/2390502639,v1:192.168.123.102:6811/2390502639]' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm02", "root=default"]}]: dispatch 2026-03-10T07:04:12.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:12 vm02 ceph-mon[87980]: osdmap e94: 8 total, 7 up, 8 in 2026-03-10T07:04:12.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:12 vm02 ceph-mon[87980]: from='osd.1 ' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm02", "root=default"]}]: dispatch 2026-03-10T07:04:12.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:12 vm02 ceph-mon[87980]: Health check failed: Degraded data redundancy: 45/630 objects degraded (7.143%), 12 pgs degraded (PG_DEGRADED) 2026-03-10T07:04:12.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:12 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:12.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:12 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:12.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:12 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:04:12.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:12 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T07:04:12.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:12 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:12.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:12 vm02 ceph-mon[86149]: pgmap v33: 161 pgs: 18 active+undersized, 11 stale+active+clean, 12 active+undersized+degraded, 120 active+clean; 457 KiB data, 122 MiB used, 160 GiB / 160 GiB avail; 45/630 objects degraded (7.143%) 2026-03-10T07:04:12.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:12 vm02 ceph-mon[86149]: from='osd.1 ' entity='osd.1' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]': finished 2026-03-10T07:04:12.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:12 vm02 ceph-mon[86149]: from='osd.1 [v2:192.168.123.102:6810/2390502639,v1:192.168.123.102:6811/2390502639]' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm02", "root=default"]}]: dispatch 2026-03-10T07:04:12.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:12 vm02 ceph-mon[86149]: osdmap e94: 8 total, 7 up, 8 in 2026-03-10T07:04:12.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:12 vm02 ceph-mon[86149]: from='osd.1 ' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm02", "root=default"]}]: dispatch 2026-03-10T07:04:12.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:12 vm02 ceph-mon[86149]: Health check failed: Degraded data redundancy: 45/630 objects degraded (7.143%), 12 pgs degraded (PG_DEGRADED) 2026-03-10T07:04:12.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:12 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:12.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:12 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:12.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:12 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:04:12.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:12 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T07:04:12.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:12 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:12.835 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:04:12 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: 2026-03-10T07:04:12.596+0000 7f03c12b6640 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (11 PGs are or would become offline) 2026-03-10T07:04:13.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:12 vm05 ceph-mon[76604]: pgmap v33: 161 pgs: 18 active+undersized, 11 stale+active+clean, 12 active+undersized+degraded, 120 active+clean; 457 KiB data, 122 MiB used, 160 GiB / 160 GiB avail; 45/630 objects degraded (7.143%) 2026-03-10T07:04:13.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:12 vm05 ceph-mon[76604]: from='osd.1 ' entity='osd.1' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]': finished 2026-03-10T07:04:13.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:12 vm05 ceph-mon[76604]: from='osd.1 [v2:192.168.123.102:6810/2390502639,v1:192.168.123.102:6811/2390502639]' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm02", "root=default"]}]: dispatch 2026-03-10T07:04:13.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:12 vm05 ceph-mon[76604]: osdmap e94: 8 total, 7 up, 8 in 2026-03-10T07:04:13.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:12 vm05 ceph-mon[76604]: from='osd.1 ' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm02", "root=default"]}]: dispatch 2026-03-10T07:04:13.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:12 vm05 ceph-mon[76604]: Health check failed: Degraded data redundancy: 45/630 objects degraded (7.143%), 12 pgs degraded (PG_DEGRADED) 2026-03-10T07:04:13.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:12 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:13.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:12 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:13.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:12 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:04:13.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:12 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T07:04:13.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:12 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:13.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:13 vm02 ceph-mon[87980]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-10T07:04:13.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:13 vm02 ceph-mon[87980]: osd.1 [v2:192.168.123.102:6810/2390502639,v1:192.168.123.102:6811/2390502639] boot 2026-03-10T07:04:13.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:13 vm02 ceph-mon[87980]: osdmap e95: 8 total, 8 up, 8 in 2026-03-10T07:04:13.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:13 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T07:04:13.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:13 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T07:04:13.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:13 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:04:13.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:13 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:04:13.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:13 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:04:13.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:13 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["2"], "max": 16}]: dispatch 2026-03-10T07:04:13.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:13 vm02 ceph-mon[87980]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["2"], "max": 16}]: dispatch 2026-03-10T07:04:13.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:13 vm02 ceph-mon[87980]: Upgrade: unsafe to stop osd(s) at this time (11 PGs are or would become offline) 2026-03-10T07:04:13.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:13 vm02 ceph-mon[87980]: osdmap e96: 8 total, 8 up, 8 in 2026-03-10T07:04:13.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:13 vm02 ceph-mon[86149]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-10T07:04:13.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:13 vm02 ceph-mon[86149]: osd.1 [v2:192.168.123.102:6810/2390502639,v1:192.168.123.102:6811/2390502639] boot 2026-03-10T07:04:13.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:13 vm02 ceph-mon[86149]: osdmap e95: 8 total, 8 up, 8 in 2026-03-10T07:04:13.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:13 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T07:04:13.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:13 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T07:04:13.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:13 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:04:13.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:13 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:04:13.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:13 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:04:13.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:13 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["2"], "max": 16}]: dispatch 2026-03-10T07:04:13.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:13 vm02 ceph-mon[86149]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["2"], "max": 16}]: dispatch 2026-03-10T07:04:13.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:13 vm02 ceph-mon[86149]: Upgrade: unsafe to stop osd(s) at this time (11 PGs are or would become offline) 2026-03-10T07:04:13.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:13 vm02 ceph-mon[86149]: osdmap e96: 8 total, 8 up, 8 in 2026-03-10T07:04:14.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:13 vm05 ceph-mon[76604]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-10T07:04:14.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:13 vm05 ceph-mon[76604]: osd.1 [v2:192.168.123.102:6810/2390502639,v1:192.168.123.102:6811/2390502639] boot 2026-03-10T07:04:14.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:13 vm05 ceph-mon[76604]: osdmap e95: 8 total, 8 up, 8 in 2026-03-10T07:04:14.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:13 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T07:04:14.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:13 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T07:04:14.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:13 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:04:14.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:13 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:04:14.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:13 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:04:14.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:13 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["2"], "max": 16}]: dispatch 2026-03-10T07:04:14.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:13 vm05 ceph-mon[76604]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["2"], "max": 16}]: dispatch 2026-03-10T07:04:14.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:13 vm05 ceph-mon[76604]: Upgrade: unsafe to stop osd(s) at this time (11 PGs are or would become offline) 2026-03-10T07:04:14.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:13 vm05 ceph-mon[76604]: osdmap e96: 8 total, 8 up, 8 in 2026-03-10T07:04:14.504 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:04:14 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:04:14.146Z caller=alerting.go:391 level=warn component="rule manager" alert="unsupported value type" msg="Expanding alert template failed" err="error executing template __alert_CephOSDDown: template: __alert_CephOSDDown:1:358: executing \"__alert_CephOSDDown\" at : error calling query: found duplicate series for the match group {ceph_daemon=\"osd.1\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.1\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"28bd35e6-1c4e-11f1-9057-21b3549603fc\", cluster_addr=\"192.168.123.102\", device_class=\"hdd\", hostname=\"vm02\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.102\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.1\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.102\", device_class=\"hdd\", hostname=\"vm02\", instance=\"192.168.123.105:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.102\"}];many-to-many matching not allowed: matching labels must be unique on one side" data="unsupported value type" 2026-03-10T07:04:14.504 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:04:14 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:04:14.147Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.1\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.1\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"28bd35e6-1c4e-11f1-9057-21b3549603fc\", cluster_addr=\"192.168.123.102\", device_class=\"hdd\", hostname=\"vm02\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.102\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.1\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.102\", device_class=\"hdd\", hostname=\"vm02\", instance=\"192.168.123.105:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.102\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T07:04:15.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:14 vm05 ceph-mon[76604]: pgmap v36: 161 pgs: 42 active+undersized, 21 active+undersized+degraded, 98 active+clean; 457 KiB data, 141 MiB used, 160 GiB / 160 GiB avail; 84/630 objects degraded (13.333%) 2026-03-10T07:04:15.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:14 vm02 ceph-mon[87980]: pgmap v36: 161 pgs: 42 active+undersized, 21 active+undersized+degraded, 98 active+clean; 457 KiB data, 141 MiB used, 160 GiB / 160 GiB avail; 84/630 objects degraded (13.333%) 2026-03-10T07:04:15.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:14 vm02 ceph-mon[86149]: pgmap v36: 161 pgs: 42 active+undersized, 21 active+undersized+degraded, 98 active+clean; 457 KiB data, 141 MiB used, 160 GiB / 160 GiB avail; 84/630 objects degraded (13.333%) 2026-03-10T07:04:16.939 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:16 vm05 ceph-mon[76604]: pgmap v38: 161 pgs: 42 active+undersized, 21 active+undersized+degraded, 98 active+clean; 457 KiB data, 141 MiB used, 160 GiB / 160 GiB avail; 84/630 objects degraded (13.333%) 2026-03-10T07:04:16.950 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:16 vm02 ceph-mon[87980]: pgmap v38: 161 pgs: 42 active+undersized, 21 active+undersized+degraded, 98 active+clean; 457 KiB data, 141 MiB used, 160 GiB / 160 GiB avail; 84/630 objects degraded (13.333%) 2026-03-10T07:04:16.950 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:16 vm02 ceph-mon[86149]: pgmap v38: 161 pgs: 42 active+undersized, 21 active+undersized+degraded, 98 active+clean; 457 KiB data, 141 MiB used, 160 GiB / 160 GiB avail; 84/630 objects degraded (13.333%) 2026-03-10T07:04:17.253 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:04:16 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:04:16.947Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm02\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"28bd35e6-1c4e-11f1-9057-21b3549603fc\", domainname=\"(none)\", instance=\"vm02\", job=\"node\", machine=\"x86_64\", nodename=\"vm02\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm02\", job=\"node\", machine=\"x86_64\", nodename=\"vm02\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T07:04:17.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:17 vm05 ceph-mon[76604]: from='client.15180 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:04:18.084 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:17 vm02 ceph-mon[87980]: from='client.15180 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:04:18.084 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:17 vm02 ceph-mon[86149]: from='client.15180 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:04:19.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:18 vm05 ceph-mon[76604]: pgmap v39: 161 pgs: 161 active+clean; 457 KiB data, 142 MiB used, 160 GiB / 160 GiB avail 2026-03-10T07:04:19.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:18 vm05 ceph-mon[76604]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 84/630 objects degraded (13.333%), 21 pgs degraded) 2026-03-10T07:04:19.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:18 vm05 ceph-mon[76604]: Cluster is now healthy 2026-03-10T07:04:19.084 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:18 vm02 ceph-mon[87980]: pgmap v39: 161 pgs: 161 active+clean; 457 KiB data, 142 MiB used, 160 GiB / 160 GiB avail 2026-03-10T07:04:19.084 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:18 vm02 ceph-mon[87980]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 84/630 objects degraded (13.333%), 21 pgs degraded) 2026-03-10T07:04:19.084 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:18 vm02 ceph-mon[87980]: Cluster is now healthy 2026-03-10T07:04:19.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:18 vm02 ceph-mon[86149]: pgmap v39: 161 pgs: 161 active+clean; 457 KiB data, 142 MiB used, 160 GiB / 160 GiB avail 2026-03-10T07:04:19.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:18 vm02 ceph-mon[86149]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 84/630 objects degraded (13.333%), 21 pgs degraded) 2026-03-10T07:04:19.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:18 vm02 ceph-mon[86149]: Cluster is now healthy 2026-03-10T07:04:21.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:20 vm05 ceph-mon[76604]: pgmap v40: 161 pgs: 161 active+clean; 457 KiB data, 142 MiB used, 160 GiB / 160 GiB avail; 683 B/s rd, 0 op/s 2026-03-10T07:04:21.084 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:20 vm02 ceph-mon[87980]: pgmap v40: 161 pgs: 161 active+clean; 457 KiB data, 142 MiB used, 160 GiB / 160 GiB avail; 683 B/s rd, 0 op/s 2026-03-10T07:04:21.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:20 vm02 ceph-mon[86149]: pgmap v40: 161 pgs: 161 active+clean; 457 KiB data, 142 MiB used, 160 GiB / 160 GiB avail; 683 B/s rd, 0 op/s 2026-03-10T07:04:22.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:21 vm05 ceph-mon[76604]: pgmap v41: 161 pgs: 161 active+clean; 457 KiB data, 142 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:04:22.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:21 vm02 ceph-mon[87980]: pgmap v41: 161 pgs: 161 active+clean; 457 KiB data, 142 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:04:22.085 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:04:21 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: ::ffff:192.168.123.105 - - [10/Mar/2026:07:04:21] "GET /metrics HTTP/1.1" 200 37960 "" "Prometheus/2.51.0" 2026-03-10T07:04:22.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:21 vm02 ceph-mon[86149]: pgmap v41: 161 pgs: 161 active+clean; 457 KiB data, 142 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:04:24.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:24 vm05 ceph-mon[76604]: pgmap v42: 161 pgs: 161 active+clean; 457 KiB data, 142 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-10T07:04:24.504 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:04:24 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:04:24.147Z caller=alerting.go:391 level=warn component="rule manager" alert="unsupported value type" msg="Expanding alert template failed" err="error executing template __alert_CephOSDDown: template: __alert_CephOSDDown:1:358: executing \"__alert_CephOSDDown\" at : error calling query: found duplicate series for the match group {ceph_daemon=\"osd.1\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.1\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"28bd35e6-1c4e-11f1-9057-21b3549603fc\", cluster_addr=\"192.168.123.102\", device_class=\"hdd\", hostname=\"vm02\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.102\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.1\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.102\", device_class=\"hdd\", hostname=\"vm02\", instance=\"192.168.123.105:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.102\"}];many-to-many matching not allowed: matching labels must be unique on one side" data="unsupported value type" 2026-03-10T07:04:24.504 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:04:24 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:04:24.148Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.1\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.1\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"28bd35e6-1c4e-11f1-9057-21b3549603fc\", cluster_addr=\"192.168.123.102\", device_class=\"hdd\", hostname=\"vm02\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.102\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.1\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.102\", device_class=\"hdd\", hostname=\"vm02\", instance=\"192.168.123.105:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.102\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T07:04:24.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:24 vm02 ceph-mon[87980]: pgmap v42: 161 pgs: 161 active+clean; 457 KiB data, 142 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-10T07:04:24.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:24 vm02 ceph-mon[86149]: pgmap v42: 161 pgs: 161 active+clean; 457 KiB data, 142 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-10T07:04:26.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:26 vm02 ceph-mon[87980]: pgmap v43: 161 pgs: 161 active+clean; 457 KiB data, 142 MiB used, 160 GiB / 160 GiB avail; 891 B/s rd, 0 op/s 2026-03-10T07:04:26.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:26 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:26.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:26 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T07:04:26.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:26 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:26.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:26 vm02 ceph-mon[86149]: pgmap v43: 161 pgs: 161 active+clean; 457 KiB data, 142 MiB used, 160 GiB / 160 GiB avail; 891 B/s rd, 0 op/s 2026-03-10T07:04:26.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:26 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:26.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:26 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T07:04:26.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:26 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:26.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:26 vm05 ceph-mon[76604]: pgmap v43: 161 pgs: 161 active+clean; 457 KiB data, 142 MiB used, 160 GiB / 160 GiB avail; 891 B/s rd, 0 op/s 2026-03-10T07:04:26.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:26 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:26.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:26 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T07:04:26.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:26 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:27.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:27 vm05 ceph-mon[76604]: from='client.15180 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:04:27.254 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:04:26 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:04:26.947Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm02\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"28bd35e6-1c4e-11f1-9057-21b3549603fc\", domainname=\"(none)\", instance=\"vm02\", job=\"node\", machine=\"x86_64\", nodename=\"vm02\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm02\", job=\"node\", machine=\"x86_64\", nodename=\"vm02\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T07:04:27.334 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:27 vm02 ceph-mon[87980]: from='client.15180 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:04:27.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:27 vm02 ceph-mon[86149]: from='client.15180 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:04:28.087 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:28 vm02 ceph-mon[87980]: pgmap v44: 161 pgs: 161 active+clean; 457 KiB data, 142 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:04:28.087 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:28 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["2"], "max": 16}]: dispatch 2026-03-10T07:04:28.087 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:28 vm02 ceph-mon[87980]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["2"], "max": 16}]: dispatch 2026-03-10T07:04:28.087 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:28 vm02 ceph-mon[87980]: Upgrade: osd.2 is safe to restart 2026-03-10T07:04:28.087 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:28 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:28.087 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:28 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.2"}]: dispatch 2026-03-10T07:04:28.087 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:28 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:04:28.353 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:28 vm02 ceph-mon[86149]: pgmap v44: 161 pgs: 161 active+clean; 457 KiB data, 142 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:04:28.353 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:28 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["2"], "max": 16}]: dispatch 2026-03-10T07:04:28.353 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:28 vm02 ceph-mon[86149]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["2"], "max": 16}]: dispatch 2026-03-10T07:04:28.353 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:28 vm02 ceph-mon[86149]: Upgrade: osd.2 is safe to restart 2026-03-10T07:04:28.353 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:28 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:28.353 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:28 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.2"}]: dispatch 2026-03-10T07:04:28.353 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:28 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:04:28.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:28 vm05 ceph-mon[76604]: pgmap v44: 161 pgs: 161 active+clean; 457 KiB data, 142 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:04:28.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:28 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["2"], "max": 16}]: dispatch 2026-03-10T07:04:28.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:28 vm05 ceph-mon[76604]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["2"], "max": 16}]: dispatch 2026-03-10T07:04:28.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:28 vm05 ceph-mon[76604]: Upgrade: osd.2 is safe to restart 2026-03-10T07:04:28.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:28 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:28.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:28 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.2"}]: dispatch 2026-03-10T07:04:28.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:28 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:04:29.085 INFO:journalctl@ceph.osd.2.vm02.stdout:Mar 10 07:04:28 vm02 systemd[1]: Stopping Ceph osd.2 for 28bd35e6-1c4e-11f1-9057-21b3549603fc... 2026-03-10T07:04:29.085 INFO:journalctl@ceph.osd.2.vm02.stdout:Mar 10 07:04:28 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-2[62882]: 2026-03-10T07:04:28.665+0000 7fd5fe2f4700 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-10T07:04:29.085 INFO:journalctl@ceph.osd.2.vm02.stdout:Mar 10 07:04:28 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-2[62882]: 2026-03-10T07:04:28.665+0000 7fd5fe2f4700 -1 osd.2 96 *** Got signal Terminated *** 2026-03-10T07:04:29.085 INFO:journalctl@ceph.osd.2.vm02.stdout:Mar 10 07:04:28 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-2[62882]: 2026-03-10T07:04:28.665+0000 7fd5fe2f4700 -1 osd.2 96 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-10T07:04:29.346 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:29 vm02 ceph-mon[87980]: Upgrade: Updating osd.2 2026-03-10T07:04:29.346 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:29 vm02 ceph-mon[87980]: Deploying daemon osd.2 on vm02 2026-03-10T07:04:29.346 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:29 vm02 ceph-mon[87980]: osd.2 marked itself down and dead 2026-03-10T07:04:29.346 INFO:journalctl@ceph.osd.2.vm02.stdout:Mar 10 07:04:29 vm02 podman[94081]: 2026-03-10 07:04:29.140349929 +0000 UTC m=+0.488025118 container died 6409c5be18aa63d5b0ee2696821485c479b08b827d6a5a37fc0176a6c1bceda4 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-2, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, release=754, vcs-type=git, io.buildah.version=1.19.8, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, name=centos-stream, GIT_CLEAN=True, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, maintainer=Guillaume Abrioux , build-date=2022-05-03T08:36:31.336870, com.redhat.component=centos-stream-container, RELEASE=HEAD, ceph=True, io.openshift.tags=base centos centos-stream, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, version=8, GIT_BRANCH=HEAD, io.k8s.display-name=CentOS Stream 8, GIT_REPO=https://github.com/ceph/ceph-container.git, io.openshift.expose-services=, CEPH_POINT_RELEASE=-17.2.0, vendor=Red Hat, Inc., GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, architecture=x86_64, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, distribution-scope=public) 2026-03-10T07:04:29.346 INFO:journalctl@ceph.osd.2.vm02.stdout:Mar 10 07:04:29 vm02 podman[94081]: 2026-03-10 07:04:29.162728547 +0000 UTC m=+0.510403736 container remove 6409c5be18aa63d5b0ee2696821485c479b08b827d6a5a37fc0176a6c1bceda4 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-2, com.redhat.component=centos-stream-container, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, io.openshift.tags=base centos centos-stream, vendor=Red Hat, Inc., GIT_BRANCH=HEAD, distribution-scope=public, GIT_REPO=https://github.com/ceph/ceph-container.git, RELEASE=HEAD, maintainer=Guillaume Abrioux , architecture=x86_64, release=754, version=8, ceph=True, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, io.buildah.version=1.19.8, io.k8s.display-name=CentOS Stream 8, name=centos-stream, CEPH_POINT_RELEASE=-17.2.0, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, GIT_CLEAN=True, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, build-date=2022-05-03T08:36:31.336870, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, vcs-type=git, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, io.openshift.expose-services=) 2026-03-10T07:04:29.346 INFO:journalctl@ceph.osd.2.vm02.stdout:Mar 10 07:04:29 vm02 bash[94081]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-2 2026-03-10T07:04:29.346 INFO:journalctl@ceph.osd.2.vm02.stdout:Mar 10 07:04:29 vm02 podman[94148]: 2026-03-10 07:04:29.301589295 +0000 UTC m=+0.018348225 container create bd7955c6e7fb15f515634ffe8fcc71e101e921c6a666fad147aa4fbc30566f38 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-2-deactivate, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_REF=squid, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, io.buildah.version=1.41.3, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223) 2026-03-10T07:04:29.346 INFO:journalctl@ceph.osd.2.vm02.stdout:Mar 10 07:04:29 vm02 podman[94148]: 2026-03-10 07:04:29.34157067 +0000 UTC m=+0.058329601 container init bd7955c6e7fb15f515634ffe8fcc71e101e921c6a666fad147aa4fbc30566f38 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-2-deactivate, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, CEPH_REF=squid, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.schema-version=1.0, org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, OSD_FLAVOR=default) 2026-03-10T07:04:29.346 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:29 vm02 ceph-mon[86149]: Upgrade: Updating osd.2 2026-03-10T07:04:29.346 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:29 vm02 ceph-mon[86149]: Deploying daemon osd.2 on vm02 2026-03-10T07:04:29.346 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:29 vm02 ceph-mon[86149]: osd.2 marked itself down and dead 2026-03-10T07:04:29.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:29 vm05 ceph-mon[76604]: Upgrade: Updating osd.2 2026-03-10T07:04:29.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:29 vm05 ceph-mon[76604]: Deploying daemon osd.2 on vm02 2026-03-10T07:04:29.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:29 vm05 ceph-mon[76604]: osd.2 marked itself down and dead 2026-03-10T07:04:29.689 INFO:journalctl@ceph.osd.2.vm02.stdout:Mar 10 07:04:29 vm02 podman[94148]: 2026-03-10 07:04:29.346233679 +0000 UTC m=+0.062992600 container start bd7955c6e7fb15f515634ffe8fcc71e101e921c6a666fad147aa4fbc30566f38 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-2-deactivate, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, io.buildah.version=1.41.3, org.label-schema.build-date=20260223) 2026-03-10T07:04:29.689 INFO:journalctl@ceph.osd.2.vm02.stdout:Mar 10 07:04:29 vm02 podman[94148]: 2026-03-10 07:04:29.347833694 +0000 UTC m=+0.064592615 container attach bd7955c6e7fb15f515634ffe8fcc71e101e921c6a666fad147aa4fbc30566f38 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-2-deactivate, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team , ceph=True, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=squid) 2026-03-10T07:04:29.689 INFO:journalctl@ceph.osd.2.vm02.stdout:Mar 10 07:04:29 vm02 podman[94148]: 2026-03-10 07:04:29.293802488 +0000 UTC m=+0.010561420 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T07:04:29.689 INFO:journalctl@ceph.osd.2.vm02.stdout:Mar 10 07:04:29 vm02 podman[94148]: 2026-03-10 07:04:29.475411642 +0000 UTC m=+0.192170563 container died bd7955c6e7fb15f515634ffe8fcc71e101e921c6a666fad147aa4fbc30566f38 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-2-deactivate, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, ceph=True, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, CEPH_REF=squid, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.name=CentOS Stream 9 Base Image) 2026-03-10T07:04:29.689 INFO:journalctl@ceph.osd.2.vm02.stdout:Mar 10 07:04:29 vm02 podman[94148]: 2026-03-10 07:04:29.497265317 +0000 UTC m=+0.214024238 container remove bd7955c6e7fb15f515634ffe8fcc71e101e921c6a666fad147aa4fbc30566f38 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-2-deactivate, ceph=True, CEPH_REF=squid, org.label-schema.vendor=CentOS, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team , FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-10T07:04:29.689 INFO:journalctl@ceph.osd.2.vm02.stdout:Mar 10 07:04:29 vm02 systemd[1]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@osd.2.service: Deactivated successfully. 2026-03-10T07:04:29.689 INFO:journalctl@ceph.osd.2.vm02.stdout:Mar 10 07:04:29 vm02 systemd[1]: Stopped Ceph osd.2 for 28bd35e6-1c4e-11f1-9057-21b3549603fc. 2026-03-10T07:04:29.689 INFO:journalctl@ceph.osd.2.vm02.stdout:Mar 10 07:04:29 vm02 systemd[1]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@osd.2.service: Consumed 12.907s CPU time. 2026-03-10T07:04:29.689 INFO:journalctl@ceph.osd.2.vm02.stdout:Mar 10 07:04:29 vm02 systemd[1]: Starting Ceph osd.2 for 28bd35e6-1c4e-11f1-9057-21b3549603fc... 2026-03-10T07:04:30.085 INFO:journalctl@ceph.osd.2.vm02.stdout:Mar 10 07:04:29 vm02 podman[94248]: 2026-03-10 07:04:29.780810023 +0000 UTC m=+0.015911984 container create 189a5b4d01dcdb23448ff58e80e02acb80c78091e65eaca07c92e8fa76082cfa (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-2-activate, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, OSD_FLAVOR=default, CEPH_REF=squid, org.label-schema.license=GPLv2, ceph=True, org.label-schema.vendor=CentOS, org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team , GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3) 2026-03-10T07:04:30.085 INFO:journalctl@ceph.osd.2.vm02.stdout:Mar 10 07:04:29 vm02 podman[94248]: 2026-03-10 07:04:29.818340319 +0000 UTC m=+0.053442270 container init 189a5b4d01dcdb23448ff58e80e02acb80c78091e65eaca07c92e8fa76082cfa (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-2-activate, org.label-schema.license=GPLv2, ceph=True, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.41.3, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.schema-version=1.0, CEPH_REF=squid, org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-10T07:04:30.085 INFO:journalctl@ceph.osd.2.vm02.stdout:Mar 10 07:04:29 vm02 podman[94248]: 2026-03-10 07:04:29.821500624 +0000 UTC m=+0.056602585 container start 189a5b4d01dcdb23448ff58e80e02acb80c78091e65eaca07c92e8fa76082cfa (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-2-activate, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, CEPH_REF=squid, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.vendor=CentOS, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.schema-version=1.0, org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2) 2026-03-10T07:04:30.085 INFO:journalctl@ceph.osd.2.vm02.stdout:Mar 10 07:04:29 vm02 podman[94248]: 2026-03-10 07:04:29.826170965 +0000 UTC m=+0.061272926 container attach 189a5b4d01dcdb23448ff58e80e02acb80c78091e65eaca07c92e8fa76082cfa (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-2-activate, org.label-schema.schema-version=1.0, org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.vendor=CentOS, CEPH_REF=squid, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-10T07:04:30.085 INFO:journalctl@ceph.osd.2.vm02.stdout:Mar 10 07:04:29 vm02 podman[94248]: 2026-03-10 07:04:29.774301762 +0000 UTC m=+0.009403732 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T07:04:30.085 INFO:journalctl@ceph.osd.2.vm02.stdout:Mar 10 07:04:29 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-2-activate[94259]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T07:04:30.085 INFO:journalctl@ceph.osd.2.vm02.stdout:Mar 10 07:04:29 vm02 bash[94248]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T07:04:30.085 INFO:journalctl@ceph.osd.2.vm02.stdout:Mar 10 07:04:29 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-2-activate[94259]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T07:04:30.085 INFO:journalctl@ceph.osd.2.vm02.stdout:Mar 10 07:04:29 vm02 bash[94248]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T07:04:30.358 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:30 vm02 ceph-mon[87980]: pgmap v45: 161 pgs: 161 active+clean; 457 KiB data, 142 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:04:30.358 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:30 vm02 ceph-mon[87980]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-10T07:04:30.358 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:30 vm02 ceph-mon[87980]: osdmap e97: 8 total, 7 up, 8 in 2026-03-10T07:04:30.358 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:30 vm02 ceph-mon[86149]: pgmap v45: 161 pgs: 161 active+clean; 457 KiB data, 142 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:04:30.358 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:30 vm02 ceph-mon[86149]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-10T07:04:30.358 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:30 vm02 ceph-mon[86149]: osdmap e97: 8 total, 7 up, 8 in 2026-03-10T07:04:30.358 INFO:journalctl@ceph.osd.2.vm02.stdout:Mar 10 07:04:30 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-2-activate[94259]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-10T07:04:30.358 INFO:journalctl@ceph.osd.2.vm02.stdout:Mar 10 07:04:30 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-2-activate[94259]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T07:04:30.358 INFO:journalctl@ceph.osd.2.vm02.stdout:Mar 10 07:04:30 vm02 bash[94248]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-10T07:04:30.358 INFO:journalctl@ceph.osd.2.vm02.stdout:Mar 10 07:04:30 vm02 bash[94248]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T07:04:30.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:30 vm05 ceph-mon[76604]: pgmap v45: 161 pgs: 161 active+clean; 457 KiB data, 142 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:04:30.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:30 vm05 ceph-mon[76604]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-10T07:04:30.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:30 vm05 ceph-mon[76604]: osdmap e97: 8 total, 7 up, 8 in 2026-03-10T07:04:30.690 INFO:journalctl@ceph.osd.2.vm02.stdout:Mar 10 07:04:30 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-2-activate[94259]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T07:04:30.690 INFO:journalctl@ceph.osd.2.vm02.stdout:Mar 10 07:04:30 vm02 bash[94248]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T07:04:30.690 INFO:journalctl@ceph.osd.2.vm02.stdout:Mar 10 07:04:30 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-2-activate[94259]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-2 2026-03-10T07:04:30.690 INFO:journalctl@ceph.osd.2.vm02.stdout:Mar 10 07:04:30 vm02 bash[94248]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-2 2026-03-10T07:04:30.690 INFO:journalctl@ceph.osd.2.vm02.stdout:Mar 10 07:04:30 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-2-activate[94259]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-b67b1e85-fa60-4fde-81b6-38997b015185/osd-block-dd1d0f2f-d7ae-4bd8-8eb5-71d68bfb7ea3 --path /var/lib/ceph/osd/ceph-2 --no-mon-config 2026-03-10T07:04:30.691 INFO:journalctl@ceph.osd.2.vm02.stdout:Mar 10 07:04:30 vm02 bash[94248]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-b67b1e85-fa60-4fde-81b6-38997b015185/osd-block-dd1d0f2f-d7ae-4bd8-8eb5-71d68bfb7ea3 --path /var/lib/ceph/osd/ceph-2 --no-mon-config 2026-03-10T07:04:30.691 INFO:journalctl@ceph.osd.2.vm02.stdout:Mar 10 07:04:30 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-2-activate[94259]: Running command: /usr/bin/ln -snf /dev/ceph-b67b1e85-fa60-4fde-81b6-38997b015185/osd-block-dd1d0f2f-d7ae-4bd8-8eb5-71d68bfb7ea3 /var/lib/ceph/osd/ceph-2/block 2026-03-10T07:04:30.691 INFO:journalctl@ceph.osd.2.vm02.stdout:Mar 10 07:04:30 vm02 bash[94248]: Running command: /usr/bin/ln -snf /dev/ceph-b67b1e85-fa60-4fde-81b6-38997b015185/osd-block-dd1d0f2f-d7ae-4bd8-8eb5-71d68bfb7ea3 /var/lib/ceph/osd/ceph-2/block 2026-03-10T07:04:31.033 INFO:journalctl@ceph.osd.2.vm02.stdout:Mar 10 07:04:30 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-2-activate[94259]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-2/block 2026-03-10T07:04:31.034 INFO:journalctl@ceph.osd.2.vm02.stdout:Mar 10 07:04:30 vm02 bash[94248]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-2/block 2026-03-10T07:04:31.034 INFO:journalctl@ceph.osd.2.vm02.stdout:Mar 10 07:04:30 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-2-activate[94259]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-2 2026-03-10T07:04:31.034 INFO:journalctl@ceph.osd.2.vm02.stdout:Mar 10 07:04:30 vm02 bash[94248]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-2 2026-03-10T07:04:31.034 INFO:journalctl@ceph.osd.2.vm02.stdout:Mar 10 07:04:30 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-2-activate[94259]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-2 2026-03-10T07:04:31.034 INFO:journalctl@ceph.osd.2.vm02.stdout:Mar 10 07:04:30 vm02 bash[94248]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-2 2026-03-10T07:04:31.034 INFO:journalctl@ceph.osd.2.vm02.stdout:Mar 10 07:04:30 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-2-activate[94259]: --> ceph-volume lvm activate successful for osd ID: 2 2026-03-10T07:04:31.034 INFO:journalctl@ceph.osd.2.vm02.stdout:Mar 10 07:04:30 vm02 bash[94248]: --> ceph-volume lvm activate successful for osd ID: 2 2026-03-10T07:04:31.034 INFO:journalctl@ceph.osd.2.vm02.stdout:Mar 10 07:04:30 vm02 podman[94248]: 2026-03-10 07:04:30.723230232 +0000 UTC m=+0.958332204 container died 189a5b4d01dcdb23448ff58e80e02acb80c78091e65eaca07c92e8fa76082cfa (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-2-activate, org.label-schema.vendor=CentOS, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.schema-version=1.0, org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, CEPH_REF=squid, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.41.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True) 2026-03-10T07:04:31.034 INFO:journalctl@ceph.osd.2.vm02.stdout:Mar 10 07:04:30 vm02 podman[94248]: 2026-03-10 07:04:30.74253757 +0000 UTC m=+0.977639531 container remove 189a5b4d01dcdb23448ff58e80e02acb80c78091e65eaca07c92e8fa76082cfa (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-2-activate, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, CEPH_REF=squid) 2026-03-10T07:04:31.034 INFO:journalctl@ceph.osd.2.vm02.stdout:Mar 10 07:04:30 vm02 podman[94496]: 2026-03-10 07:04:30.851216957 +0000 UTC m=+0.021292304 container create b0f12ddcd6b3b42d718532df48aa3ce40bb8ee654c51d57d5a71076128c55e06 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-2, org.label-schema.vendor=CentOS, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, ceph=True, org.label-schema.build-date=20260223, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git) 2026-03-10T07:04:31.034 INFO:journalctl@ceph.osd.2.vm02.stdout:Mar 10 07:04:30 vm02 podman[94496]: 2026-03-10 07:04:30.895602454 +0000 UTC m=+0.065677801 container init b0f12ddcd6b3b42d718532df48aa3ce40bb8ee654c51d57d5a71076128c55e06 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-2, CEPH_REF=squid, io.buildah.version=1.41.3, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.label-schema.build-date=20260223, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, ceph=True, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git) 2026-03-10T07:04:31.034 INFO:journalctl@ceph.osd.2.vm02.stdout:Mar 10 07:04:30 vm02 podman[94496]: 2026-03-10 07:04:30.902539518 +0000 UTC m=+0.072614865 container start b0f12ddcd6b3b42d718532df48aa3ce40bb8ee654c51d57d5a71076128c55e06 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, ceph=True, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.build-date=20260223, CEPH_REF=squid) 2026-03-10T07:04:31.034 INFO:journalctl@ceph.osd.2.vm02.stdout:Mar 10 07:04:30 vm02 bash[94496]: b0f12ddcd6b3b42d718532df48aa3ce40bb8ee654c51d57d5a71076128c55e06 2026-03-10T07:04:31.034 INFO:journalctl@ceph.osd.2.vm02.stdout:Mar 10 07:04:30 vm02 podman[94496]: 2026-03-10 07:04:30.842805103 +0000 UTC m=+0.012880440 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T07:04:31.034 INFO:journalctl@ceph.osd.2.vm02.stdout:Mar 10 07:04:30 vm02 systemd[1]: Started Ceph osd.2 for 28bd35e6-1c4e-11f1-9057-21b3549603fc. 2026-03-10T07:04:31.303 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:31 vm02 ceph-mon[87980]: osdmap e98: 8 total, 7 up, 8 in 2026-03-10T07:04:31.303 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:31 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:31.303 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:31 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:31.304 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:31 vm02 ceph-mon[86149]: osdmap e98: 8 total, 7 up, 8 in 2026-03-10T07:04:31.304 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:31 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:31.304 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:31 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:31.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:31 vm05 ceph-mon[76604]: osdmap e98: 8 total, 7 up, 8 in 2026-03-10T07:04:31.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:31 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:31.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:31 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:31.961 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:04:31 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: ::ffff:192.168.123.105 - - [10/Mar/2026:07:04:31] "GET /metrics HTTP/1.1" 200 37972 "" "Prometheus/2.51.0" 2026-03-10T07:04:31.961 INFO:journalctl@ceph.osd.2.vm02.stdout:Mar 10 07:04:31 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-2[94506]: 2026-03-10T07:04:31.731+0000 7fb8f61a8740 -1 Falling back to public interface 2026-03-10T07:04:32.266 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:32 vm02 ceph-mon[87980]: pgmap v48: 161 pgs: 29 active+undersized, 1 stale+active+clean, 12 active+undersized+degraded, 119 active+clean; 457 KiB data, 142 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s; 37/630 objects degraded (5.873%) 2026-03-10T07:04:32.267 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:32 vm02 ceph-mon[87980]: Health check failed: Degraded data redundancy: 37/630 objects degraded (5.873%), 12 pgs degraded (PG_DEGRADED) 2026-03-10T07:04:32.267 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:32 vm02 ceph-mon[86149]: pgmap v48: 161 pgs: 29 active+undersized, 1 stale+active+clean, 12 active+undersized+degraded, 119 active+clean; 457 KiB data, 142 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s; 37/630 objects degraded (5.873%) 2026-03-10T07:04:32.267 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:32 vm02 ceph-mon[86149]: Health check failed: Degraded data redundancy: 37/630 objects degraded (5.873%), 12 pgs degraded (PG_DEGRADED) 2026-03-10T07:04:32.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:32 vm05 ceph-mon[76604]: pgmap v48: 161 pgs: 29 active+undersized, 1 stale+active+clean, 12 active+undersized+degraded, 119 active+clean; 457 KiB data, 142 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s; 37/630 objects degraded (5.873%) 2026-03-10T07:04:32.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:32 vm05 ceph-mon[76604]: Health check failed: Degraded data redundancy: 37/630 objects degraded (5.873%), 12 pgs degraded (PG_DEGRADED) 2026-03-10T07:04:32.584 INFO:journalctl@ceph.osd.2.vm02.stdout:Mar 10 07:04:32 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-2[94506]: 2026-03-10T07:04:32.346+0000 7fb8f61a8740 -1 osd.2 0 read_superblock omap replica is missing. 2026-03-10T07:04:32.585 INFO:journalctl@ceph.osd.2.vm02.stdout:Mar 10 07:04:32 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-2[94506]: 2026-03-10T07:04:32.366+0000 7fb8f61a8740 -1 osd.2 96 log_to_monitors true 2026-03-10T07:04:33.308 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:33 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:33.308 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:33 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:33.308 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:33 vm02 ceph-mon[86149]: from='osd.2 [v2:192.168.123.102:6818/3975655084,v1:192.168.123.102:6819/3975655084]' entity='osd.2' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]: dispatch 2026-03-10T07:04:33.308 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:33 vm02 ceph-mon[86149]: from='osd.2 ' entity='osd.2' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]: dispatch 2026-03-10T07:04:33.308 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:33 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:33.308 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:33 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:33.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:33 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:33.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:33 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:33.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:33 vm02 ceph-mon[87980]: from='osd.2 [v2:192.168.123.102:6818/3975655084,v1:192.168.123.102:6819/3975655084]' entity='osd.2' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]: dispatch 2026-03-10T07:04:33.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:33 vm02 ceph-mon[87980]: from='osd.2 ' entity='osd.2' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]: dispatch 2026-03-10T07:04:33.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:33 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:33.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:33 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:33.585 INFO:journalctl@ceph.osd.2.vm02.stdout:Mar 10 07:04:33 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-2[94506]: 2026-03-10T07:04:33.361+0000 7fb8edf53640 -1 osd.2 96 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-10T07:04:33.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:33 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:33.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:33 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:33.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:33 vm05 ceph-mon[76604]: from='osd.2 [v2:192.168.123.102:6818/3975655084,v1:192.168.123.102:6819/3975655084]' entity='osd.2' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]: dispatch 2026-03-10T07:04:33.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:33 vm05 ceph-mon[76604]: from='osd.2 ' entity='osd.2' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]: dispatch 2026-03-10T07:04:33.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:33 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:33.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:33 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:34.084 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:04:33 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: 2026-03-10T07:04:33.971+0000 7f03c12b6640 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (14 PGs are or would become offline) 2026-03-10T07:04:34.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:34 vm05 ceph-mon[76604]: pgmap v49: 161 pgs: 32 active+undersized, 15 active+undersized+degraded, 114 active+clean; 457 KiB data, 142 MiB used, 160 GiB / 160 GiB avail; 57/630 objects degraded (9.048%) 2026-03-10T07:04:34.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:34 vm05 ceph-mon[76604]: from='osd.2 ' entity='osd.2' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]': finished 2026-03-10T07:04:34.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:34 vm05 ceph-mon[76604]: osdmap e99: 8 total, 7 up, 8 in 2026-03-10T07:04:34.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:34 vm05 ceph-mon[76604]: from='osd.2 [v2:192.168.123.102:6818/3975655084,v1:192.168.123.102:6819/3975655084]' entity='osd.2' cmd=[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm02", "root=default"]}]: dispatch 2026-03-10T07:04:34.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:34 vm05 ceph-mon[76604]: from='osd.2 ' entity='osd.2' cmd=[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm02", "root=default"]}]: dispatch 2026-03-10T07:04:34.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:34 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:34.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:34 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:34.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:34 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:04:34.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:34 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T07:04:34.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:34 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:34.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:34 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T07:04:34.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:34 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:04:34.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:34 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:04:34.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:34 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:04:34.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:34 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["3"], "max": 16}]: dispatch 2026-03-10T07:04:34.504 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:04:34 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:04:34.147Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.2\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.2\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"28bd35e6-1c4e-11f1-9057-21b3549603fc\", cluster_addr=\"192.168.123.102\", device_class=\"hdd\", hostname=\"vm02\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.102\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.2\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.102\", device_class=\"hdd\", hostname=\"vm02\", instance=\"192.168.123.105:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.102\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T07:04:34.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:34 vm02 ceph-mon[86149]: pgmap v49: 161 pgs: 32 active+undersized, 15 active+undersized+degraded, 114 active+clean; 457 KiB data, 142 MiB used, 160 GiB / 160 GiB avail; 57/630 objects degraded (9.048%) 2026-03-10T07:04:34.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:34 vm02 ceph-mon[86149]: from='osd.2 ' entity='osd.2' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]': finished 2026-03-10T07:04:34.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:34 vm02 ceph-mon[86149]: osdmap e99: 8 total, 7 up, 8 in 2026-03-10T07:04:34.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:34 vm02 ceph-mon[86149]: from='osd.2 [v2:192.168.123.102:6818/3975655084,v1:192.168.123.102:6819/3975655084]' entity='osd.2' cmd=[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm02", "root=default"]}]: dispatch 2026-03-10T07:04:34.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:34 vm02 ceph-mon[86149]: from='osd.2 ' entity='osd.2' cmd=[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm02", "root=default"]}]: dispatch 2026-03-10T07:04:34.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:34 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:34.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:34 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:34.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:34 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:04:34.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:34 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T07:04:34.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:34 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:34.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:34 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T07:04:34.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:34 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:04:34.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:34 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:04:34.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:34 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:04:34.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:34 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["3"], "max": 16}]: dispatch 2026-03-10T07:04:34.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:34 vm02 ceph-mon[87980]: pgmap v49: 161 pgs: 32 active+undersized, 15 active+undersized+degraded, 114 active+clean; 457 KiB data, 142 MiB used, 160 GiB / 160 GiB avail; 57/630 objects degraded (9.048%) 2026-03-10T07:04:34.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:34 vm02 ceph-mon[87980]: from='osd.2 ' entity='osd.2' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]': finished 2026-03-10T07:04:34.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:34 vm02 ceph-mon[87980]: osdmap e99: 8 total, 7 up, 8 in 2026-03-10T07:04:34.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:34 vm02 ceph-mon[87980]: from='osd.2 [v2:192.168.123.102:6818/3975655084,v1:192.168.123.102:6819/3975655084]' entity='osd.2' cmd=[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm02", "root=default"]}]: dispatch 2026-03-10T07:04:34.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:34 vm02 ceph-mon[87980]: from='osd.2 ' entity='osd.2' cmd=[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm02", "root=default"]}]: dispatch 2026-03-10T07:04:34.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:34 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:34.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:34 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:34.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:34 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:04:34.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:34 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T07:04:34.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:34 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:34.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:34 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T07:04:34.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:34 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:04:34.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:34 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:04:34.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:34 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:04:34.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:34 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["3"], "max": 16}]: dispatch 2026-03-10T07:04:35.514 INFO:teuthology.orchestra.run.vm02.stdout:true 2026-03-10T07:04:35.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:35 vm05 ceph-mon[76604]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["3"], "max": 16}]: dispatch 2026-03-10T07:04:35.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:35 vm05 ceph-mon[76604]: Upgrade: unsafe to stop osd(s) at this time (14 PGs are or would become offline) 2026-03-10T07:04:35.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:35 vm05 ceph-mon[76604]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-10T07:04:35.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:35 vm05 ceph-mon[76604]: osd.2 [v2:192.168.123.102:6818/3975655084,v1:192.168.123.102:6819/3975655084] boot 2026-03-10T07:04:35.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:35 vm05 ceph-mon[76604]: osdmap e100: 8 total, 8 up, 8 in 2026-03-10T07:04:35.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:35 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T07:04:35.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:35 vm02 ceph-mon[86149]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["3"], "max": 16}]: dispatch 2026-03-10T07:04:35.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:35 vm02 ceph-mon[86149]: Upgrade: unsafe to stop osd(s) at this time (14 PGs are or would become offline) 2026-03-10T07:04:35.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:35 vm02 ceph-mon[86149]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-10T07:04:35.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:35 vm02 ceph-mon[86149]: osd.2 [v2:192.168.123.102:6818/3975655084,v1:192.168.123.102:6819/3975655084] boot 2026-03-10T07:04:35.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:35 vm02 ceph-mon[86149]: osdmap e100: 8 total, 8 up, 8 in 2026-03-10T07:04:35.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:35 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T07:04:35.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:35 vm02 ceph-mon[87980]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["3"], "max": 16}]: dispatch 2026-03-10T07:04:35.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:35 vm02 ceph-mon[87980]: Upgrade: unsafe to stop osd(s) at this time (14 PGs are or would become offline) 2026-03-10T07:04:35.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:35 vm02 ceph-mon[87980]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-10T07:04:35.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:35 vm02 ceph-mon[87980]: osd.2 [v2:192.168.123.102:6818/3975655084,v1:192.168.123.102:6819/3975655084] boot 2026-03-10T07:04:35.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:35 vm02 ceph-mon[87980]: osdmap e100: 8 total, 8 up, 8 in 2026-03-10T07:04:35.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:35 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T07:04:35.939 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T07:04:35.939 INFO:teuthology.orchestra.run.vm02.stdout:alertmanager.a vm02 *:9093,9094 running (103s) 3s ago 6m 22.5M - 0.25.0 c8568f914cd2 520cbcc5ad98 2026-03-10T07:04:35.939 INFO:teuthology.orchestra.run.vm02.stdout:grafana.a vm05 *:3000 running (93s) 58s ago 5m 42.2M - dad864ee21e9 34567dcb4b51 2026-03-10T07:04:35.939 INFO:teuthology.orchestra.run.vm02.stdout:iscsi.foo.vm02.iphfbm vm02 running (81s) 3s ago 5m 51.9M - 3.5 e1d6a67b021e a06caff18850 2026-03-10T07:04:35.939 INFO:teuthology.orchestra.run.vm02.stdout:mgr.x vm05 *:8443,9283,8765 running (79s) 58s ago 7m 485M - 19.2.3-678-ge911bdeb 654f31e6858e cdd4c8db2d43 2026-03-10T07:04:35.939 INFO:teuthology.orchestra.run.vm02.stdout:mgr.y vm02 *:8443,9283,8765 running (91s) 3s ago 8m 553M - 19.2.3-678-ge911bdeb 654f31e6858e e1dc22afcf31 2026-03-10T07:04:35.939 INFO:teuthology.orchestra.run.vm02.stdout:mon.a vm02 running (74s) 3s ago 8m 47.2M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 194e84dd73c4 2026-03-10T07:04:35.939 INFO:teuthology.orchestra.run.vm02.stdout:mon.b vm05 running (59s) 58s ago 7m 20.6M 2048M 19.2.3-678-ge911bdeb 654f31e6858e e901cead026d 2026-03-10T07:04:35.939 INFO:teuthology.orchestra.run.vm02.stdout:mon.c vm02 running (65s) 3s ago 7m 41.2M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 959c5054b3d9 2026-03-10T07:04:35.939 INFO:teuthology.orchestra.run.vm02.stdout:node-exporter.a vm02 *:9100 running (106s) 3s ago 6m 9407k - 1.7.0 72c9c2088986 0bc0b34c732a 2026-03-10T07:04:35.939 INFO:teuthology.orchestra.run.vm02.stdout:node-exporter.b vm05 *:9100 running (99s) 58s ago 6m 8946k - 1.7.0 72c9c2088986 0129ed456f9d 2026-03-10T07:04:35.939 INFO:teuthology.orchestra.run.vm02.stdout:osd.0 vm02 running (48s) 3s ago 7m 67.7M 4096M 19.2.3-678-ge911bdeb 654f31e6858e bf86ac25f7fb 2026-03-10T07:04:35.939 INFO:teuthology.orchestra.run.vm02.stdout:osd.1 vm02 running (26s) 3s ago 7m 45.6M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 6a939dbe6adc 2026-03-10T07:04:35.939 INFO:teuthology.orchestra.run.vm02.stdout:osd.2 vm02 running (5s) 3s ago 7m 13.2M 4096M 19.2.3-678-ge911bdeb 654f31e6858e b0f12ddcd6b3 2026-03-10T07:04:35.939 INFO:teuthology.orchestra.run.vm02.stdout:osd.3 vm02 running (7m) 3s ago 7m 52.2M 4096M 17.2.0 e1d6a67b021e 5df7cfae139f 2026-03-10T07:04:35.939 INFO:teuthology.orchestra.run.vm02.stdout:osd.4 vm05 running (6m) 58s ago 6m 53.0M 4096M 17.2.0 e1d6a67b021e dcf9e1b40e11 2026-03-10T07:04:35.939 INFO:teuthology.orchestra.run.vm02.stdout:osd.5 vm05 running (6m) 58s ago 6m 50.9M 4096M 17.2.0 e1d6a67b021e ef1a70593f89 2026-03-10T07:04:35.939 INFO:teuthology.orchestra.run.vm02.stdout:osd.6 vm05 running (6m) 58s ago 6m 53.6M 4096M 17.2.0 e1d6a67b021e 9f83e32d5eb6 2026-03-10T07:04:35.939 INFO:teuthology.orchestra.run.vm02.stdout:osd.7 vm05 running (6m) 58s ago 6m 52.2M 4096M 17.2.0 e1d6a67b021e 83d454094982 2026-03-10T07:04:35.939 INFO:teuthology.orchestra.run.vm02.stdout:prometheus.a vm05 *:9095 running (80s) 58s ago 6m 40.4M - 2.51.0 1d3b7f56885b 2f14ff887bc7 2026-03-10T07:04:35.939 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.kkmsll vm02 *:8000 running (5m) 3s ago 5m 94.0M - 17.2.0 e1d6a67b021e ef903c439808 2026-03-10T07:04:35.939 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm05.bmslvs vm05 *:8000 running (5m) 58s ago 5m 93.8M - 17.2.0 e1d6a67b021e acd35f4810d9 2026-03-10T07:04:35.939 INFO:teuthology.orchestra.run.vm02.stdout:rgw.smpl.vm02.kyvfxo vm02 *:80 running (5m) 3s ago 5m 92.0M - 17.2.0 e1d6a67b021e 6c68381e5378 2026-03-10T07:04:35.939 INFO:teuthology.orchestra.run.vm02.stdout:rgw.smpl.vm05.xjafam vm05 *:80 running (5m) 58s ago 5m 93.7M - 17.2.0 e1d6a67b021e 62a81876b05e 2026-03-10T07:04:36.187 INFO:teuthology.orchestra.run.vm02.stdout:{ 2026-03-10T07:04:36.187 INFO:teuthology.orchestra.run.vm02.stdout: "mon": { 2026-03-10T07:04:36.187 INFO:teuthology.orchestra.run.vm02.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 3 2026-03-10T07:04:36.187 INFO:teuthology.orchestra.run.vm02.stdout: }, 2026-03-10T07:04:36.187 INFO:teuthology.orchestra.run.vm02.stdout: "mgr": { 2026-03-10T07:04:36.187 INFO:teuthology.orchestra.run.vm02.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-10T07:04:36.187 INFO:teuthology.orchestra.run.vm02.stdout: }, 2026-03-10T07:04:36.187 INFO:teuthology.orchestra.run.vm02.stdout: "osd": { 2026-03-10T07:04:36.187 INFO:teuthology.orchestra.run.vm02.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 5, 2026-03-10T07:04:36.187 INFO:teuthology.orchestra.run.vm02.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 3 2026-03-10T07:04:36.187 INFO:teuthology.orchestra.run.vm02.stdout: }, 2026-03-10T07:04:36.187 INFO:teuthology.orchestra.run.vm02.stdout: "rgw": { 2026-03-10T07:04:36.187 INFO:teuthology.orchestra.run.vm02.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 4 2026-03-10T07:04:36.187 INFO:teuthology.orchestra.run.vm02.stdout: }, 2026-03-10T07:04:36.187 INFO:teuthology.orchestra.run.vm02.stdout: "overall": { 2026-03-10T07:04:36.187 INFO:teuthology.orchestra.run.vm02.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 9, 2026-03-10T07:04:36.187 INFO:teuthology.orchestra.run.vm02.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 8 2026-03-10T07:04:36.187 INFO:teuthology.orchestra.run.vm02.stdout: } 2026-03-10T07:04:36.187 INFO:teuthology.orchestra.run.vm02.stdout:} 2026-03-10T07:04:36.397 INFO:teuthology.orchestra.run.vm02.stdout:{ 2026-03-10T07:04:36.398 INFO:teuthology.orchestra.run.vm02.stdout: "target_image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", 2026-03-10T07:04:36.398 INFO:teuthology.orchestra.run.vm02.stdout: "in_progress": true, 2026-03-10T07:04:36.398 INFO:teuthology.orchestra.run.vm02.stdout: "which": "Upgrading all daemon types on all hosts", 2026-03-10T07:04:36.398 INFO:teuthology.orchestra.run.vm02.stdout: "services_complete": [ 2026-03-10T07:04:36.398 INFO:teuthology.orchestra.run.vm02.stdout: "mgr", 2026-03-10T07:04:36.398 INFO:teuthology.orchestra.run.vm02.stdout: "mon" 2026-03-10T07:04:36.398 INFO:teuthology.orchestra.run.vm02.stdout: ], 2026-03-10T07:04:36.398 INFO:teuthology.orchestra.run.vm02.stdout: "progress": "8/23 daemons upgraded", 2026-03-10T07:04:36.398 INFO:teuthology.orchestra.run.vm02.stdout: "message": "Currently upgrading osd daemons", 2026-03-10T07:04:36.398 INFO:teuthology.orchestra.run.vm02.stdout: "is_paused": false 2026-03-10T07:04:36.398 INFO:teuthology.orchestra.run.vm02.stdout:} 2026-03-10T07:04:36.640 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:36 vm02 ceph-mon[86149]: pgmap v52: 161 pgs: 32 active+undersized, 15 active+undersized+degraded, 114 active+clean; 457 KiB data, 161 MiB used, 160 GiB / 160 GiB avail; 57/630 objects degraded (9.048%) 2026-03-10T07:04:36.640 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:36 vm02 ceph-mon[86149]: osdmap e101: 8 total, 8 up, 8 in 2026-03-10T07:04:36.640 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:36 vm02 ceph-mon[86149]: from='client.34200 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:04:36.640 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:36 vm02 ceph-mon[86149]: from='client.? 192.168.123.102:0/3095033769' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:04:36.640 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:36 vm02 ceph-mon[87980]: pgmap v52: 161 pgs: 32 active+undersized, 15 active+undersized+degraded, 114 active+clean; 457 KiB data, 161 MiB used, 160 GiB / 160 GiB avail; 57/630 objects degraded (9.048%) 2026-03-10T07:04:36.640 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:36 vm02 ceph-mon[87980]: osdmap e101: 8 total, 8 up, 8 in 2026-03-10T07:04:36.640 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:36 vm02 ceph-mon[87980]: from='client.34200 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:04:36.640 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:36 vm02 ceph-mon[87980]: from='client.? 192.168.123.102:0/3095033769' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:04:36.640 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN Degraded data redundancy: 57/630 objects degraded (9.048%), 15 pgs degraded 2026-03-10T07:04:36.640 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] PG_DEGRADED: Degraded data redundancy: 57/630 objects degraded (9.048%), 15 pgs degraded 2026-03-10T07:04:36.640 INFO:teuthology.orchestra.run.vm02.stdout: pg 2.f is active+undersized+degraded, acting [4,0] 2026-03-10T07:04:36.640 INFO:teuthology.orchestra.run.vm02.stdout: pg 2.10 is active+undersized+degraded, acting [1,0] 2026-03-10T07:04:36.640 INFO:teuthology.orchestra.run.vm02.stdout: pg 2.17 is active+undersized+degraded, acting [6,5] 2026-03-10T07:04:36.640 INFO:teuthology.orchestra.run.vm02.stdout: pg 3.0 is active+undersized+degraded, acting [1,6] 2026-03-10T07:04:36.640 INFO:teuthology.orchestra.run.vm02.stdout: pg 3.4 is active+undersized+degraded, acting [1,5] 2026-03-10T07:04:36.640 INFO:teuthology.orchestra.run.vm02.stdout: pg 3.5 is active+undersized+degraded, acting [5,3] 2026-03-10T07:04:36.640 INFO:teuthology.orchestra.run.vm02.stdout: pg 3.9 is active+undersized+degraded, acting [4,7] 2026-03-10T07:04:36.640 INFO:teuthology.orchestra.run.vm02.stdout: pg 3.13 is active+undersized+degraded, acting [7,4] 2026-03-10T07:04:36.640 INFO:teuthology.orchestra.run.vm02.stdout: pg 3.1a is active+undersized+degraded, acting [4,1] 2026-03-10T07:04:36.640 INFO:teuthology.orchestra.run.vm02.stdout: pg 3.1e is active+undersized+degraded, acting [3,6] 2026-03-10T07:04:36.640 INFO:teuthology.orchestra.run.vm02.stdout: pg 3.1f is active+undersized+degraded, acting [0,5] 2026-03-10T07:04:36.640 INFO:teuthology.orchestra.run.vm02.stdout: pg 4.d is active+undersized+degraded, acting [4,1] 2026-03-10T07:04:36.640 INFO:teuthology.orchestra.run.vm02.stdout: pg 4.1f is active+undersized+degraded, acting [6,5] 2026-03-10T07:04:36.640 INFO:teuthology.orchestra.run.vm02.stdout: pg 5.a is active+undersized+degraded, acting [4,3] 2026-03-10T07:04:36.640 INFO:teuthology.orchestra.run.vm02.stdout: pg 5.1c is active+undersized+degraded, acting [4,3] 2026-03-10T07:04:36.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:36 vm05 ceph-mon[76604]: pgmap v52: 161 pgs: 32 active+undersized, 15 active+undersized+degraded, 114 active+clean; 457 KiB data, 161 MiB used, 160 GiB / 160 GiB avail; 57/630 objects degraded (9.048%) 2026-03-10T07:04:36.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:36 vm05 ceph-mon[76604]: osdmap e101: 8 total, 8 up, 8 in 2026-03-10T07:04:36.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:36 vm05 ceph-mon[76604]: from='client.34200 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:04:36.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:36 vm05 ceph-mon[76604]: from='client.? 192.168.123.102:0/3095033769' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:04:37.254 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:04:36 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:04:36.947Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm02\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"28bd35e6-1c4e-11f1-9057-21b3549603fc\", domainname=\"(none)\", instance=\"vm02\", job=\"node\", machine=\"x86_64\", nodename=\"vm02\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm02\", job=\"node\", machine=\"x86_64\", nodename=\"vm02\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T07:04:37.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:37 vm05 ceph-mon[76604]: from='client.34206 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:04:37.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:37 vm05 ceph-mon[76604]: from='client.44149 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:04:37.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:37 vm05 ceph-mon[76604]: from='client.15180 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:04:37.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:37 vm05 ceph-mon[76604]: from='client.34224 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:04:37.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:37 vm05 ceph-mon[76604]: from='client.? 192.168.123.102:0/631171292' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T07:04:37.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:37 vm02 ceph-mon[87980]: from='client.34206 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:04:37.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:37 vm02 ceph-mon[87980]: from='client.44149 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:04:37.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:37 vm02 ceph-mon[87980]: from='client.15180 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:04:37.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:37 vm02 ceph-mon[87980]: from='client.34224 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:04:37.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:37 vm02 ceph-mon[87980]: from='client.? 192.168.123.102:0/631171292' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T07:04:37.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:37 vm02 ceph-mon[86149]: from='client.34206 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:04:37.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:37 vm02 ceph-mon[86149]: from='client.44149 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:04:37.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:37 vm02 ceph-mon[86149]: from='client.15180 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:04:37.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:37 vm02 ceph-mon[86149]: from='client.34224 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:04:37.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:37 vm02 ceph-mon[86149]: from='client.? 192.168.123.102:0/631171292' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T07:04:38.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:38 vm05 ceph-mon[76604]: pgmap v54: 161 pgs: 5 peering, 10 active+undersized, 2 active+undersized+degraded, 144 active+clean; 457 KiB data, 162 MiB used, 160 GiB / 160 GiB avail; 2/630 objects degraded (0.317%) 2026-03-10T07:04:38.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:38 vm05 ceph-mon[76604]: Health check update: Degraded data redundancy: 2/630 objects degraded (0.317%), 2 pgs degraded (PG_DEGRADED) 2026-03-10T07:04:38.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:38 vm02 ceph-mon[87980]: pgmap v54: 161 pgs: 5 peering, 10 active+undersized, 2 active+undersized+degraded, 144 active+clean; 457 KiB data, 162 MiB used, 160 GiB / 160 GiB avail; 2/630 objects degraded (0.317%) 2026-03-10T07:04:38.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:38 vm02 ceph-mon[87980]: Health check update: Degraded data redundancy: 2/630 objects degraded (0.317%), 2 pgs degraded (PG_DEGRADED) 2026-03-10T07:04:38.834 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:38 vm02 ceph-mon[86149]: pgmap v54: 161 pgs: 5 peering, 10 active+undersized, 2 active+undersized+degraded, 144 active+clean; 457 KiB data, 162 MiB used, 160 GiB / 160 GiB avail; 2/630 objects degraded (0.317%) 2026-03-10T07:04:38.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:38 vm02 ceph-mon[86149]: Health check update: Degraded data redundancy: 2/630 objects degraded (0.317%), 2 pgs degraded (PG_DEGRADED) 2026-03-10T07:04:39.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:39 vm05 ceph-mon[76604]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 2/630 objects degraded (0.317%), 2 pgs degraded) 2026-03-10T07:04:39.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:39 vm05 ceph-mon[76604]: Cluster is now healthy 2026-03-10T07:04:39.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:39 vm02 ceph-mon[87980]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 2/630 objects degraded (0.317%), 2 pgs degraded) 2026-03-10T07:04:39.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:39 vm02 ceph-mon[87980]: Cluster is now healthy 2026-03-10T07:04:39.834 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:39 vm02 ceph-mon[86149]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 2/630 objects degraded (0.317%), 2 pgs degraded) 2026-03-10T07:04:39.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:39 vm02 ceph-mon[86149]: Cluster is now healthy 2026-03-10T07:04:40.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:40 vm05 ceph-mon[76604]: pgmap v55: 161 pgs: 5 peering, 156 active+clean; 457 KiB data, 162 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:04:40.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:40 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T07:04:40.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:40 vm02 ceph-mon[87980]: pgmap v55: 161 pgs: 5 peering, 156 active+clean; 457 KiB data, 162 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:04:40.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:40 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T07:04:40.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:40 vm02 ceph-mon[86149]: pgmap v55: 161 pgs: 5 peering, 156 active+clean; 457 KiB data, 162 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:04:40.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:40 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T07:04:41.835 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:04:41 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: ::ffff:192.168.123.105 - - [10/Mar/2026:07:04:41] "GET /metrics HTTP/1.1" 200 37985 "" "Prometheus/2.51.0" 2026-03-10T07:04:42.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:42 vm02 ceph-mon[87980]: pgmap v56: 161 pgs: 161 active+clean; 457 KiB data, 162 MiB used, 160 GiB / 160 GiB avail; 1.3 KiB/s rd, 1 op/s 2026-03-10T07:04:42.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:42 vm02 ceph-mon[86149]: pgmap v56: 161 pgs: 161 active+clean; 457 KiB data, 162 MiB used, 160 GiB / 160 GiB avail; 1.3 KiB/s rd, 1 op/s 2026-03-10T07:04:43.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:42 vm05 ceph-mon[76604]: pgmap v56: 161 pgs: 161 active+clean; 457 KiB data, 162 MiB used, 160 GiB / 160 GiB avail; 1.3 KiB/s rd, 1 op/s 2026-03-10T07:04:44.503 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:04:44 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:04:44.147Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.3\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.3\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"28bd35e6-1c4e-11f1-9057-21b3549603fc\", cluster_addr=\"192.168.123.102\", device_class=\"hdd\", hostname=\"vm02\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.102\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.3\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.102\", device_class=\"hdd\", hostname=\"vm02\", instance=\"192.168.123.105:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.102\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T07:04:45.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:44 vm05 ceph-mon[76604]: pgmap v57: 161 pgs: 161 active+clean; 457 KiB data, 162 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-10T07:04:45.084 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:44 vm02 ceph-mon[87980]: pgmap v57: 161 pgs: 161 active+clean; 457 KiB data, 162 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-10T07:04:45.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:44 vm02 ceph-mon[86149]: pgmap v57: 161 pgs: 161 active+clean; 457 KiB data, 162 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-10T07:04:46.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:45 vm05 ceph-mon[76604]: pgmap v58: 161 pgs: 161 active+clean; 457 KiB data, 162 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-10T07:04:46.084 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:45 vm02 ceph-mon[87980]: pgmap v58: 161 pgs: 161 active+clean; 457 KiB data, 162 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-10T07:04:46.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:45 vm02 ceph-mon[86149]: pgmap v58: 161 pgs: 161 active+clean; 457 KiB data, 162 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-10T07:04:46.844 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:46 vm02 ceph-mon[87980]: from='client.15180 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:04:46.844 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:46 vm02 ceph-mon[86149]: from='client.15180 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:04:47.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:46 vm05 ceph-mon[76604]: from='client.15180 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:04:47.253 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:04:46 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:04:46.947Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm02\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"28bd35e6-1c4e-11f1-9057-21b3549603fc\", domainname=\"(none)\", instance=\"vm02\", job=\"node\", machine=\"x86_64\", nodename=\"vm02\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm02\", job=\"node\", machine=\"x86_64\", nodename=\"vm02\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T07:04:48.084 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:47 vm02 ceph-mon[87980]: pgmap v59: 161 pgs: 161 active+clean; 457 KiB data, 162 MiB used, 160 GiB / 160 GiB avail; 1.3 KiB/s rd, 1 op/s 2026-03-10T07:04:48.084 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:47 vm02 ceph-mon[86149]: pgmap v59: 161 pgs: 161 active+clean; 457 KiB data, 162 MiB used, 160 GiB / 160 GiB avail; 1.3 KiB/s rd, 1 op/s 2026-03-10T07:04:48.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:47 vm05 ceph-mon[76604]: pgmap v59: 161 pgs: 161 active+clean; 457 KiB data, 162 MiB used, 160 GiB / 160 GiB avail; 1.3 KiB/s rd, 1 op/s 2026-03-10T07:04:49.292 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:49 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["3"], "max": 16}]: dispatch 2026-03-10T07:04:49.292 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:49 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["3"], "max": 16}]: dispatch 2026-03-10T07:04:49.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:49 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["3"], "max": 16}]: dispatch 2026-03-10T07:04:50.085 INFO:journalctl@ceph.osd.3.vm02.stdout:Mar 10 07:04:50 vm02 systemd[1]: Stopping Ceph osd.3 for 28bd35e6-1c4e-11f1-9057-21b3549603fc... 2026-03-10T07:04:50.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:50 vm02 ceph-mon[87980]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["3"], "max": 16}]: dispatch 2026-03-10T07:04:50.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:50 vm02 ceph-mon[87980]: Upgrade: osd.3 is safe to restart 2026-03-10T07:04:50.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:50 vm02 ceph-mon[87980]: pgmap v60: 161 pgs: 161 active+clean; 457 KiB data, 162 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:04:50.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:50 vm02 ceph-mon[87980]: Upgrade: Updating osd.3 2026-03-10T07:04:50.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:50 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:50.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:50 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.3"}]: dispatch 2026-03-10T07:04:50.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:50 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:04:50.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:50 vm02 ceph-mon[87980]: Deploying daemon osd.3 on vm02 2026-03-10T07:04:50.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:50 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:50.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:50 vm02 ceph-mon[87980]: osd.3 marked itself down and dead 2026-03-10T07:04:50.585 INFO:journalctl@ceph.osd.3.vm02.stdout:Mar 10 07:04:50 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-3[65679]: 2026-03-10T07:04:50.119+0000 7f530f0cd700 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.3 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-10T07:04:50.585 INFO:journalctl@ceph.osd.3.vm02.stdout:Mar 10 07:04:50 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-3[65679]: 2026-03-10T07:04:50.119+0000 7f530f0cd700 -1 osd.3 101 *** Got signal Terminated *** 2026-03-10T07:04:50.585 INFO:journalctl@ceph.osd.3.vm02.stdout:Mar 10 07:04:50 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-3[65679]: 2026-03-10T07:04:50.119+0000 7f530f0cd700 -1 osd.3 101 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-10T07:04:50.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:50 vm02 ceph-mon[86149]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["3"], "max": 16}]: dispatch 2026-03-10T07:04:50.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:50 vm02 ceph-mon[86149]: Upgrade: osd.3 is safe to restart 2026-03-10T07:04:50.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:50 vm02 ceph-mon[86149]: pgmap v60: 161 pgs: 161 active+clean; 457 KiB data, 162 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:04:50.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:50 vm02 ceph-mon[86149]: Upgrade: Updating osd.3 2026-03-10T07:04:50.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:50 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:50.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:50 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.3"}]: dispatch 2026-03-10T07:04:50.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:50 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:04:50.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:50 vm02 ceph-mon[86149]: Deploying daemon osd.3 on vm02 2026-03-10T07:04:50.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:50 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:50.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:50 vm02 ceph-mon[86149]: osd.3 marked itself down and dead 2026-03-10T07:04:50.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:50 vm05 ceph-mon[76604]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["3"], "max": 16}]: dispatch 2026-03-10T07:04:50.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:50 vm05 ceph-mon[76604]: Upgrade: osd.3 is safe to restart 2026-03-10T07:04:50.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:50 vm05 ceph-mon[76604]: pgmap v60: 161 pgs: 161 active+clean; 457 KiB data, 162 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:04:50.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:50 vm05 ceph-mon[76604]: Upgrade: Updating osd.3 2026-03-10T07:04:50.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:50 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:50.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:50 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.3"}]: dispatch 2026-03-10T07:04:50.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:50 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:04:50.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:50 vm05 ceph-mon[76604]: Deploying daemon osd.3 on vm02 2026-03-10T07:04:50.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:50 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:50.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:50 vm05 ceph-mon[76604]: osd.3 marked itself down and dead 2026-03-10T07:04:51.410 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:51 vm02 ceph-mon[87980]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-10T07:04:51.410 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:51 vm02 ceph-mon[87980]: osdmap e102: 8 total, 7 up, 8 in 2026-03-10T07:04:51.410 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:51 vm02 ceph-mon[86149]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-10T07:04:51.410 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:51 vm02 ceph-mon[86149]: osdmap e102: 8 total, 7 up, 8 in 2026-03-10T07:04:51.411 INFO:journalctl@ceph.osd.3.vm02.stdout:Mar 10 07:04:51 vm02 podman[96180]: 2026-03-10 07:04:51.14556926 +0000 UTC m=+1.039294212 container died 5df7cfae139f2029d3c33e1f4e17becba8600213f470e2068f3fad89324fdd2c (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-3, release=754, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, GIT_BRANCH=HEAD, vcs-type=git, CEPH_POINT_RELEASE=-17.2.0, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, io.openshift.tags=base centos centos-stream, architecture=x86_64, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, version=8, GIT_REPO=https://github.com/ceph/ceph-container.git, RELEASE=HEAD, build-date=2022-05-03T08:36:31.336870, ceph=True, GIT_CLEAN=True, com.redhat.component=centos-stream-container, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, io.openshift.expose-services=, io.buildah.version=1.19.8, io.k8s.display-name=CentOS Stream 8, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, maintainer=Guillaume Abrioux , name=centos-stream, vendor=Red Hat, Inc., distribution-scope=public) 2026-03-10T07:04:51.411 INFO:journalctl@ceph.osd.3.vm02.stdout:Mar 10 07:04:51 vm02 podman[96180]: 2026-03-10 07:04:51.173796863 +0000 UTC m=+1.067521815 container remove 5df7cfae139f2029d3c33e1f4e17becba8600213f470e2068f3fad89324fdd2c (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-3, build-date=2022-05-03T08:36:31.336870, vendor=Red Hat, Inc., description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, name=centos-stream, CEPH_POINT_RELEASE=-17.2.0, GIT_CLEAN=True, maintainer=Guillaume Abrioux , vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, io.buildah.version=1.19.8, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., com.redhat.component=centos-stream-container, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, ceph=True, release=754, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, distribution-scope=public, vcs-type=git, io.k8s.display-name=CentOS Stream 8, io.openshift.expose-services=, GIT_REPO=https://github.com/ceph/ceph-container.git, architecture=x86_64, io.openshift.tags=base centos centos-stream, RELEASE=HEAD, version=8, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, GIT_BRANCH=HEAD) 2026-03-10T07:04:51.411 INFO:journalctl@ceph.osd.3.vm02.stdout:Mar 10 07:04:51 vm02 bash[96180]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-3 2026-03-10T07:04:51.411 INFO:journalctl@ceph.osd.3.vm02.stdout:Mar 10 07:04:51 vm02 podman[96246]: 2026-03-10 07:04:51.318598681 +0000 UTC m=+0.016546460 container create b7152fa931ecc8cd09863003a6b28ca195b6c8a100af395f0af27d2d2364c775 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-3-deactivate, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223, CEPH_REF=squid, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image) 2026-03-10T07:04:51.411 INFO:journalctl@ceph.osd.3.vm02.stdout:Mar 10 07:04:51 vm02 podman[96246]: 2026-03-10 07:04:51.373312089 +0000 UTC m=+0.071259878 container init b7152fa931ecc8cd09863003a6b28ca195b6c8a100af395f0af27d2d2364c775 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-3-deactivate, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git) 2026-03-10T07:04:51.411 INFO:journalctl@ceph.osd.3.vm02.stdout:Mar 10 07:04:51 vm02 podman[96246]: 2026-03-10 07:04:51.377116079 +0000 UTC m=+0.075063868 container start b7152fa931ecc8cd09863003a6b28ca195b6c8a100af395f0af27d2d2364c775 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-3-deactivate, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.build-date=20260223, CEPH_REF=squid, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team , io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df) 2026-03-10T07:04:51.411 INFO:journalctl@ceph.osd.3.vm02.stdout:Mar 10 07:04:51 vm02 podman[96246]: 2026-03-10 07:04:51.380526161 +0000 UTC m=+0.078473941 container attach b7152fa931ecc8cd09863003a6b28ca195b6c8a100af395f0af27d2d2364c775 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-3-deactivate, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, ceph=True, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df) 2026-03-10T07:04:51.716 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:04:51 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: ::ffff:192.168.123.105 - - [10/Mar/2026:07:04:51] "GET /metrics HTTP/1.1" 200 37985 "" "Prometheus/2.51.0" 2026-03-10T07:04:51.716 INFO:journalctl@ceph.osd.3.vm02.stdout:Mar 10 07:04:51 vm02 podman[96246]: 2026-03-10 07:04:51.312299461 +0000 UTC m=+0.010247250 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T07:04:51.716 INFO:journalctl@ceph.osd.3.vm02.stdout:Mar 10 07:04:51 vm02 conmon[96257]: conmon b7152fa931ecc8cd0986 : Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-b7152fa931ecc8cd09863003a6b28ca195b6c8a100af395f0af27d2d2364c775.scope/container/memory.events 2026-03-10T07:04:51.716 INFO:journalctl@ceph.osd.3.vm02.stdout:Mar 10 07:04:51 vm02 podman[96246]: 2026-03-10 07:04:51.514087871 +0000 UTC m=+0.212035660 container died b7152fa931ecc8cd09863003a6b28ca195b6c8a100af395f0af27d2d2364c775 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-3-deactivate, org.label-schema.license=GPLv2, OSD_FLAVOR=default, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.authors=Ceph Release Team , io.buildah.version=1.41.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-10T07:04:51.716 INFO:journalctl@ceph.osd.3.vm02.stdout:Mar 10 07:04:51 vm02 podman[96246]: 2026-03-10 07:04:51.529683101 +0000 UTC m=+0.227630880 container remove b7152fa931ecc8cd09863003a6b28ca195b6c8a100af395f0af27d2d2364c775 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-3-deactivate, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, OSD_FLAVOR=default, CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, ceph=True, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image) 2026-03-10T07:04:51.716 INFO:journalctl@ceph.osd.3.vm02.stdout:Mar 10 07:04:51 vm02 systemd[1]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@osd.3.service: Deactivated successfully. 2026-03-10T07:04:51.716 INFO:journalctl@ceph.osd.3.vm02.stdout:Mar 10 07:04:51 vm02 systemd[1]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@osd.3.service: Unit process 96257 (conmon) remains running after unit stopped. 2026-03-10T07:04:51.716 INFO:journalctl@ceph.osd.3.vm02.stdout:Mar 10 07:04:51 vm02 systemd[1]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@osd.3.service: Unit process 96265 (podman) remains running after unit stopped. 2026-03-10T07:04:51.716 INFO:journalctl@ceph.osd.3.vm02.stdout:Mar 10 07:04:51 vm02 systemd[1]: Stopped Ceph osd.3 for 28bd35e6-1c4e-11f1-9057-21b3549603fc. 2026-03-10T07:04:51.716 INFO:journalctl@ceph.osd.3.vm02.stdout:Mar 10 07:04:51 vm02 systemd[1]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@osd.3.service: Consumed 3.718s CPU time, 141.8M memory peak. 2026-03-10T07:04:51.716 INFO:journalctl@ceph.osd.3.vm02.stdout:Mar 10 07:04:51 vm02 systemd[1]: Starting Ceph osd.3 for 28bd35e6-1c4e-11f1-9057-21b3549603fc... 2026-03-10T07:04:51.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:51 vm05 ceph-mon[76604]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-10T07:04:51.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:51 vm05 ceph-mon[76604]: osdmap e102: 8 total, 7 up, 8 in 2026-03-10T07:04:52.086 INFO:journalctl@ceph.osd.3.vm02.stdout:Mar 10 07:04:51 vm02 podman[96349]: 2026-03-10 07:04:51.81089017 +0000 UTC m=+0.016237002 container create 414f0ac50a5719bc22442f28e47959a7dbf688d9a9785e7d6148df0262cc7546 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-3-activate, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, ceph=True, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git) 2026-03-10T07:04:52.086 INFO:journalctl@ceph.osd.3.vm02.stdout:Mar 10 07:04:51 vm02 podman[96349]: 2026-03-10 07:04:51.84682518 +0000 UTC m=+0.052172002 container init 414f0ac50a5719bc22442f28e47959a7dbf688d9a9785e7d6148df0262cc7546 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-3-activate, org.label-schema.build-date=20260223, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, io.buildah.version=1.41.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=squid, ceph=True, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team , FROM_IMAGE=quay.io/centos/centos:stream9) 2026-03-10T07:04:52.086 INFO:journalctl@ceph.osd.3.vm02.stdout:Mar 10 07:04:51 vm02 podman[96349]: 2026-03-10 07:04:51.852772171 +0000 UTC m=+0.058118993 container start 414f0ac50a5719bc22442f28e47959a7dbf688d9a9785e7d6148df0262cc7546 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-3-activate, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True, org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_REF=squid, io.buildah.version=1.41.3) 2026-03-10T07:04:52.086 INFO:journalctl@ceph.osd.3.vm02.stdout:Mar 10 07:04:51 vm02 podman[96349]: 2026-03-10 07:04:51.85361597 +0000 UTC m=+0.058962802 container attach 414f0ac50a5719bc22442f28e47959a7dbf688d9a9785e7d6148df0262cc7546 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-3-activate, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, CEPH_REF=squid, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df) 2026-03-10T07:04:52.086 INFO:journalctl@ceph.osd.3.vm02.stdout:Mar 10 07:04:51 vm02 podman[96349]: 2026-03-10 07:04:51.804405373 +0000 UTC m=+0.009752205 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T07:04:52.086 INFO:journalctl@ceph.osd.3.vm02.stdout:Mar 10 07:04:51 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-3-activate[96361]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T07:04:52.086 INFO:journalctl@ceph.osd.3.vm02.stdout:Mar 10 07:04:51 vm02 bash[96349]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T07:04:52.086 INFO:journalctl@ceph.osd.3.vm02.stdout:Mar 10 07:04:51 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-3-activate[96361]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T07:04:52.086 INFO:journalctl@ceph.osd.3.vm02.stdout:Mar 10 07:04:51 vm02 bash[96349]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T07:04:52.691 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:52 vm02 ceph-mon[86149]: pgmap v61: 161 pgs: 161 active+clean; 457 KiB data, 162 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:04:52.691 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:52 vm02 ceph-mon[86149]: osdmap e103: 8 total, 7 up, 8 in 2026-03-10T07:04:52.691 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:52 vm02 ceph-mon[87980]: pgmap v61: 161 pgs: 161 active+clean; 457 KiB data, 162 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:04:52.691 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:52 vm02 ceph-mon[87980]: osdmap e103: 8 total, 7 up, 8 in 2026-03-10T07:04:52.691 INFO:journalctl@ceph.osd.3.vm02.stdout:Mar 10 07:04:52 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-3-activate[96361]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-10T07:04:52.691 INFO:journalctl@ceph.osd.3.vm02.stdout:Mar 10 07:04:52 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-3-activate[96361]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T07:04:52.691 INFO:journalctl@ceph.osd.3.vm02.stdout:Mar 10 07:04:52 vm02 bash[96349]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-10T07:04:52.691 INFO:journalctl@ceph.osd.3.vm02.stdout:Mar 10 07:04:52 vm02 bash[96349]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T07:04:52.691 INFO:journalctl@ceph.osd.3.vm02.stdout:Mar 10 07:04:52 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-3-activate[96361]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T07:04:52.691 INFO:journalctl@ceph.osd.3.vm02.stdout:Mar 10 07:04:52 vm02 bash[96349]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T07:04:52.691 INFO:journalctl@ceph.osd.3.vm02.stdout:Mar 10 07:04:52 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-3-activate[96361]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-3 2026-03-10T07:04:52.691 INFO:journalctl@ceph.osd.3.vm02.stdout:Mar 10 07:04:52 vm02 bash[96349]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-3 2026-03-10T07:04:52.691 INFO:journalctl@ceph.osd.3.vm02.stdout:Mar 10 07:04:52 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-3-activate[96361]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-359d7f40-5ff9-4120-9e7f-5036306497bf/osd-block-3a40e513-6ad9-43f1-ba74-0b37d785fad9 --path /var/lib/ceph/osd/ceph-3 --no-mon-config 2026-03-10T07:04:52.691 INFO:journalctl@ceph.osd.3.vm02.stdout:Mar 10 07:04:52 vm02 bash[96349]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-359d7f40-5ff9-4120-9e7f-5036306497bf/osd-block-3a40e513-6ad9-43f1-ba74-0b37d785fad9 --path /var/lib/ceph/osd/ceph-3 --no-mon-config 2026-03-10T07:04:52.691 INFO:journalctl@ceph.osd.3.vm02.stdout:Mar 10 07:04:52 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-3-activate[96361]: Running command: /usr/bin/ln -snf /dev/ceph-359d7f40-5ff9-4120-9e7f-5036306497bf/osd-block-3a40e513-6ad9-43f1-ba74-0b37d785fad9 /var/lib/ceph/osd/ceph-3/block 2026-03-10T07:04:52.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:52 vm05 ceph-mon[76604]: pgmap v61: 161 pgs: 161 active+clean; 457 KiB data, 162 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:04:52.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:52 vm05 ceph-mon[76604]: osdmap e103: 8 total, 7 up, 8 in 2026-03-10T07:04:52.946 INFO:journalctl@ceph.osd.3.vm02.stdout:Mar 10 07:04:52 vm02 bash[96349]: Running command: /usr/bin/ln -snf /dev/ceph-359d7f40-5ff9-4120-9e7f-5036306497bf/osd-block-3a40e513-6ad9-43f1-ba74-0b37d785fad9 /var/lib/ceph/osd/ceph-3/block 2026-03-10T07:04:52.946 INFO:journalctl@ceph.osd.3.vm02.stdout:Mar 10 07:04:52 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-3-activate[96361]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-3/block 2026-03-10T07:04:52.946 INFO:journalctl@ceph.osd.3.vm02.stdout:Mar 10 07:04:52 vm02 bash[96349]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-3/block 2026-03-10T07:04:52.946 INFO:journalctl@ceph.osd.3.vm02.stdout:Mar 10 07:04:52 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-3-activate[96361]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-3 2026-03-10T07:04:52.946 INFO:journalctl@ceph.osd.3.vm02.stdout:Mar 10 07:04:52 vm02 bash[96349]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-3 2026-03-10T07:04:52.946 INFO:journalctl@ceph.osd.3.vm02.stdout:Mar 10 07:04:52 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-3-activate[96361]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-3 2026-03-10T07:04:52.946 INFO:journalctl@ceph.osd.3.vm02.stdout:Mar 10 07:04:52 vm02 bash[96349]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-3 2026-03-10T07:04:52.946 INFO:journalctl@ceph.osd.3.vm02.stdout:Mar 10 07:04:52 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-3-activate[96361]: --> ceph-volume lvm activate successful for osd ID: 3 2026-03-10T07:04:52.946 INFO:journalctl@ceph.osd.3.vm02.stdout:Mar 10 07:04:52 vm02 bash[96349]: --> ceph-volume lvm activate successful for osd ID: 3 2026-03-10T07:04:52.946 INFO:journalctl@ceph.osd.3.vm02.stdout:Mar 10 07:04:52 vm02 podman[96349]: 2026-03-10 07:04:52.717985011 +0000 UTC m=+0.923331843 container died 414f0ac50a5719bc22442f28e47959a7dbf688d9a9785e7d6148df0262cc7546 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-3-activate, org.opencontainers.image.authors=Ceph Release Team , FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_REF=squid, OSD_FLAVOR=default, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-10T07:04:53.279 INFO:journalctl@ceph.osd.3.vm02.stdout:Mar 10 07:04:53 vm02 podman[96349]: 2026-03-10 07:04:53.186642574 +0000 UTC m=+1.391989406 container remove 414f0ac50a5719bc22442f28e47959a7dbf688d9a9785e7d6148df0262cc7546 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-3-activate, CEPH_REF=squid, ceph=True, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, OSD_FLAVOR=default, org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, org.label-schema.license=GPLv2) 2026-03-10T07:04:53.570 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:53 vm02 ceph-mon[86149]: Health check failed: Reduced data availability: 1 pg peering (PG_AVAILABILITY) 2026-03-10T07:04:53.570 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:53 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:53.570 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:53 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:53.570 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:53 vm02 ceph-mon[87980]: Health check failed: Reduced data availability: 1 pg peering (PG_AVAILABILITY) 2026-03-10T07:04:53.570 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:53 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:53.570 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:53 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:53.570 INFO:journalctl@ceph.osd.3.vm02.stdout:Mar 10 07:04:53 vm02 podman[96599]: 2026-03-10 07:04:53.279585797 +0000 UTC m=+0.014440800 container create 396c050a0b4f77668a63b8750966d7d823a865daa29f8619379186d29f56b0a3 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-3, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df) 2026-03-10T07:04:53.570 INFO:journalctl@ceph.osd.3.vm02.stdout:Mar 10 07:04:53 vm02 podman[96599]: 2026-03-10 07:04:53.313405537 +0000 UTC m=+0.048260551 container init 396c050a0b4f77668a63b8750966d7d823a865daa29f8619379186d29f56b0a3 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-3, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_REF=squid, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image) 2026-03-10T07:04:53.570 INFO:journalctl@ceph.osd.3.vm02.stdout:Mar 10 07:04:53 vm02 podman[96599]: 2026-03-10 07:04:53.317859334 +0000 UTC m=+0.052714328 container start 396c050a0b4f77668a63b8750966d7d823a865daa29f8619379186d29f56b0a3 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-3, org.opencontainers.image.authors=Ceph Release Team , ceph=True, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.label-schema.build-date=20260223, CEPH_REF=squid, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git) 2026-03-10T07:04:53.570 INFO:journalctl@ceph.osd.3.vm02.stdout:Mar 10 07:04:53 vm02 bash[96599]: 396c050a0b4f77668a63b8750966d7d823a865daa29f8619379186d29f56b0a3 2026-03-10T07:04:53.570 INFO:journalctl@ceph.osd.3.vm02.stdout:Mar 10 07:04:53 vm02 podman[96599]: 2026-03-10 07:04:53.273674573 +0000 UTC m=+0.008529587 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T07:04:53.570 INFO:journalctl@ceph.osd.3.vm02.stdout:Mar 10 07:04:53 vm02 systemd[1]: Started Ceph osd.3 for 28bd35e6-1c4e-11f1-9057-21b3549603fc. 2026-03-10T07:04:54.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:53 vm05 ceph-mon[76604]: Health check failed: Reduced data availability: 1 pg peering (PG_AVAILABILITY) 2026-03-10T07:04:54.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:53 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:54.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:53 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:54.336 INFO:journalctl@ceph.osd.3.vm02.stdout:Mar 10 07:04:54 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-3[96609]: 2026-03-10T07:04:54.157+0000 7f7643dec740 -1 Falling back to public interface 2026-03-10T07:04:54.503 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:04:54 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:04:54.147Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.3\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.3\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"28bd35e6-1c4e-11f1-9057-21b3549603fc\", cluster_addr=\"192.168.123.102\", device_class=\"hdd\", hostname=\"vm02\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.102\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.3\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.102\", device_class=\"hdd\", hostname=\"vm02\", instance=\"192.168.123.105:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.102\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T07:04:54.667 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:54 vm02 ceph-mon[87980]: pgmap v64: 161 pgs: 14 peering, 21 stale+active+clean, 126 active+clean; 457 KiB data, 162 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-10T07:04:54.667 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:54 vm02 ceph-mon[86149]: pgmap v64: 161 pgs: 14 peering, 21 stale+active+clean, 126 active+clean; 457 KiB data, 162 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-10T07:04:55.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:54 vm05 ceph-mon[76604]: pgmap v64: 161 pgs: 14 peering, 21 stale+active+clean, 126 active+clean; 457 KiB data, 162 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-10T07:04:55.661 INFO:journalctl@ceph.osd.3.vm02.stdout:Mar 10 07:04:55 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-3[96609]: 2026-03-10T07:04:55.401+0000 7f7643dec740 -1 osd.3 0 read_superblock omap replica is missing. 2026-03-10T07:04:55.661 INFO:journalctl@ceph.osd.3.vm02.stdout:Mar 10 07:04:55 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-3[96609]: 2026-03-10T07:04:55.420+0000 7f7643dec740 -1 osd.3 101 log_to_monitors true 2026-03-10T07:04:56.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:55 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:56.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:55 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:56.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:55 vm05 ceph-mon[76604]: pgmap v65: 161 pgs: 5 active+undersized, 14 peering, 20 stale+active+clean, 1 active+undersized+degraded, 121 active+clean; 457 KiB data, 162 MiB used, 160 GiB / 160 GiB avail; 511 B/s rd, 0 op/s; 1/630 objects degraded (0.159%) 2026-03-10T07:04:56.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:55 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:56.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:55 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T07:04:56.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:55 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:56.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:55 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:56.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:55 vm05 ceph-mon[76604]: from='osd.3 [v2:192.168.123.102:6826/3160240204,v1:192.168.123.102:6827/3160240204]' entity='osd.3' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]: dispatch 2026-03-10T07:04:56.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:55 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:56.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:55 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:56.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:55 vm02 ceph-mon[87980]: pgmap v65: 161 pgs: 5 active+undersized, 14 peering, 20 stale+active+clean, 1 active+undersized+degraded, 121 active+clean; 457 KiB data, 162 MiB used, 160 GiB / 160 GiB avail; 511 B/s rd, 0 op/s; 1/630 objects degraded (0.159%) 2026-03-10T07:04:56.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:55 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:56.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:55 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T07:04:56.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:55 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:56.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:55 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:56.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:55 vm02 ceph-mon[87980]: from='osd.3 [v2:192.168.123.102:6826/3160240204,v1:192.168.123.102:6827/3160240204]' entity='osd.3' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]: dispatch 2026-03-10T07:04:56.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:55 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:56.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:55 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:56.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:55 vm02 ceph-mon[86149]: pgmap v65: 161 pgs: 5 active+undersized, 14 peering, 20 stale+active+clean, 1 active+undersized+degraded, 121 active+clean; 457 KiB data, 162 MiB used, 160 GiB / 160 GiB avail; 511 B/s rd, 0 op/s; 1/630 objects degraded (0.159%) 2026-03-10T07:04:56.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:55 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:56.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:55 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T07:04:56.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:55 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:56.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:55 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:56.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:55 vm02 ceph-mon[86149]: from='osd.3 [v2:192.168.123.102:6826/3160240204,v1:192.168.123.102:6827/3160240204]' entity='osd.3' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]: dispatch 2026-03-10T07:04:57.047 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:56 vm02 ceph-mon[87980]: Health check failed: Degraded data redundancy: 1/630 objects degraded (0.159%), 1 pg degraded (PG_DEGRADED) 2026-03-10T07:04:57.047 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:56 vm02 ceph-mon[87980]: from='client.15180 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:04:57.047 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:56 vm02 ceph-mon[87980]: from='osd.3 [v2:192.168.123.102:6826/3160240204,v1:192.168.123.102:6827/3160240204]' entity='osd.3' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]': finished 2026-03-10T07:04:57.047 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:56 vm02 ceph-mon[87980]: osdmap e104: 8 total, 7 up, 8 in 2026-03-10T07:04:57.047 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:56 vm02 ceph-mon[87980]: from='osd.3 [v2:192.168.123.102:6826/3160240204,v1:192.168.123.102:6827/3160240204]' entity='osd.3' cmd=[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm02", "root=default"]}]: dispatch 2026-03-10T07:04:57.047 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:56 vm02 ceph-mon[86149]: Health check failed: Degraded data redundancy: 1/630 objects degraded (0.159%), 1 pg degraded (PG_DEGRADED) 2026-03-10T07:04:57.047 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:56 vm02 ceph-mon[86149]: from='client.15180 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:04:57.047 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:56 vm02 ceph-mon[86149]: from='osd.3 [v2:192.168.123.102:6826/3160240204,v1:192.168.123.102:6827/3160240204]' entity='osd.3' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]': finished 2026-03-10T07:04:57.047 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:56 vm02 ceph-mon[86149]: osdmap e104: 8 total, 7 up, 8 in 2026-03-10T07:04:57.047 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:56 vm02 ceph-mon[86149]: from='osd.3 [v2:192.168.123.102:6826/3160240204,v1:192.168.123.102:6827/3160240204]' entity='osd.3' cmd=[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm02", "root=default"]}]: dispatch 2026-03-10T07:04:57.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:56 vm05 ceph-mon[76604]: Health check failed: Degraded data redundancy: 1/630 objects degraded (0.159%), 1 pg degraded (PG_DEGRADED) 2026-03-10T07:04:57.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:56 vm05 ceph-mon[76604]: from='client.15180 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:04:57.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:56 vm05 ceph-mon[76604]: from='osd.3 [v2:192.168.123.102:6826/3160240204,v1:192.168.123.102:6827/3160240204]' entity='osd.3' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]': finished 2026-03-10T07:04:57.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:56 vm05 ceph-mon[76604]: osdmap e104: 8 total, 7 up, 8 in 2026-03-10T07:04:57.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:56 vm05 ceph-mon[76604]: from='osd.3 [v2:192.168.123.102:6826/3160240204,v1:192.168.123.102:6827/3160240204]' entity='osd.3' cmd=[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm02", "root=default"]}]: dispatch 2026-03-10T07:04:57.253 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:04:56 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:04:56.947Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm02\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"28bd35e6-1c4e-11f1-9057-21b3549603fc\", domainname=\"(none)\", instance=\"vm02\", job=\"node\", machine=\"x86_64\", nodename=\"vm02\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm02\", job=\"node\", machine=\"x86_64\", nodename=\"vm02\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T07:04:57.334 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:04:57 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: 2026-03-10T07:04:57.046+0000 7f03c12b6640 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (20 PGs are or would become offline) 2026-03-10T07:04:58.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:57 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:58.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:57 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:58.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:57 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:04:58.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:57 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T07:04:58.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:57 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:58.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:57 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T07:04:58.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:57 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:04:58.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:57 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:04:58.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:57 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:04:58.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:57 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["4"], "max": 16}]: dispatch 2026-03-10T07:04:58.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:57 vm02 ceph-mon[87980]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["4"], "max": 16}]: dispatch 2026-03-10T07:04:58.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:57 vm02 ceph-mon[87980]: Upgrade: unsafe to stop osd(s) at this time (20 PGs are or would become offline) 2026-03-10T07:04:58.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:57 vm02 ceph-mon[87980]: pgmap v67: 161 pgs: 40 active+undersized, 26 active+undersized+degraded, 95 active+clean; 457 KiB data, 181 MiB used, 160 GiB / 160 GiB avail; 81/630 objects degraded (12.857%) 2026-03-10T07:04:58.085 INFO:journalctl@ceph.osd.3.vm02.stdout:Mar 10 07:04:57 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-3[96609]: 2026-03-10T07:04:57.806+0000 7f763b396640 -1 osd.3 101 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-10T07:04:58.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:57 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:58.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:57 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:58.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:57 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:04:58.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:57 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T07:04:58.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:57 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:58.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:57 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T07:04:58.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:57 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:04:58.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:57 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:04:58.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:57 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:04:58.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:57 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["4"], "max": 16}]: dispatch 2026-03-10T07:04:58.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:57 vm02 ceph-mon[86149]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["4"], "max": 16}]: dispatch 2026-03-10T07:04:58.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:57 vm02 ceph-mon[86149]: Upgrade: unsafe to stop osd(s) at this time (20 PGs are or would become offline) 2026-03-10T07:04:58.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:57 vm02 ceph-mon[86149]: pgmap v67: 161 pgs: 40 active+undersized, 26 active+undersized+degraded, 95 active+clean; 457 KiB data, 181 MiB used, 160 GiB / 160 GiB avail; 81/630 objects degraded (12.857%) 2026-03-10T07:04:58.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:57 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:58.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:57 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:58.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:57 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:04:58.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:57 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T07:04:58.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:57 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:04:58.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:57 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T07:04:58.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:57 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:04:58.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:57 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:04:58.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:57 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:04:58.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:57 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["4"], "max": 16}]: dispatch 2026-03-10T07:04:58.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:57 vm05 ceph-mon[76604]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["4"], "max": 16}]: dispatch 2026-03-10T07:04:58.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:57 vm05 ceph-mon[76604]: Upgrade: unsafe to stop osd(s) at this time (20 PGs are or would become offline) 2026-03-10T07:04:58.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:57 vm05 ceph-mon[76604]: pgmap v67: 161 pgs: 40 active+undersized, 26 active+undersized+degraded, 95 active+clean; 457 KiB data, 181 MiB used, 160 GiB / 160 GiB avail; 81/630 objects degraded (12.857%) 2026-03-10T07:04:59.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:58 vm05 ceph-mon[76604]: OSD bench result of 31368.029071 IOPS is not within the threshold limit range of 50.000000 IOPS and 500.000000 IOPS for osd.3. IOPS capacity is unchanged at 315.000000 IOPS. The recommendation is to establish the osd's IOPS capacity using other benchmark tools (e.g. Fio) and then override osd_mclock_max_capacity_iops_[hdd|ssd]. 2026-03-10T07:04:59.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:58 vm05 ceph-mon[76604]: Health check cleared: PG_AVAILABILITY (was: Reduced data availability: 1 pg peering) 2026-03-10T07:04:59.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:58 vm05 ceph-mon[76604]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-10T07:04:59.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:58 vm05 ceph-mon[76604]: osd.3 [v2:192.168.123.102:6826/3160240204,v1:192.168.123.102:6827/3160240204] boot 2026-03-10T07:04:59.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:58 vm05 ceph-mon[76604]: osdmap e105: 8 total, 8 up, 8 in 2026-03-10T07:04:59.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:58 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T07:04:59.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:58 vm02 ceph-mon[87980]: OSD bench result of 31368.029071 IOPS is not within the threshold limit range of 50.000000 IOPS and 500.000000 IOPS for osd.3. IOPS capacity is unchanged at 315.000000 IOPS. The recommendation is to establish the osd's IOPS capacity using other benchmark tools (e.g. Fio) and then override osd_mclock_max_capacity_iops_[hdd|ssd]. 2026-03-10T07:04:59.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:58 vm02 ceph-mon[87980]: Health check cleared: PG_AVAILABILITY (was: Reduced data availability: 1 pg peering) 2026-03-10T07:04:59.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:58 vm02 ceph-mon[87980]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-10T07:04:59.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:58 vm02 ceph-mon[87980]: osd.3 [v2:192.168.123.102:6826/3160240204,v1:192.168.123.102:6827/3160240204] boot 2026-03-10T07:04:59.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:58 vm02 ceph-mon[87980]: osdmap e105: 8 total, 8 up, 8 in 2026-03-10T07:04:59.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:58 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T07:04:59.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:58 vm02 ceph-mon[86149]: OSD bench result of 31368.029071 IOPS is not within the threshold limit range of 50.000000 IOPS and 500.000000 IOPS for osd.3. IOPS capacity is unchanged at 315.000000 IOPS. The recommendation is to establish the osd's IOPS capacity using other benchmark tools (e.g. Fio) and then override osd_mclock_max_capacity_iops_[hdd|ssd]. 2026-03-10T07:04:59.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:58 vm02 ceph-mon[86149]: Health check cleared: PG_AVAILABILITY (was: Reduced data availability: 1 pg peering) 2026-03-10T07:04:59.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:58 vm02 ceph-mon[86149]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-10T07:04:59.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:58 vm02 ceph-mon[86149]: osd.3 [v2:192.168.123.102:6826/3160240204,v1:192.168.123.102:6827/3160240204] boot 2026-03-10T07:04:59.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:58 vm02 ceph-mon[86149]: osdmap e105: 8 total, 8 up, 8 in 2026-03-10T07:04:59.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:58 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T07:05:00.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:59 vm05 ceph-mon[76604]: osdmap e106: 8 total, 8 up, 8 in 2026-03-10T07:05:00.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:04:59 vm05 ceph-mon[76604]: pgmap v70: 161 pgs: 5 peering, 36 active+undersized, 25 active+undersized+degraded, 95 active+clean; 457 KiB data, 181 MiB used, 160 GiB / 160 GiB avail; 80/630 objects degraded (12.698%) 2026-03-10T07:05:00.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:59 vm02 ceph-mon[87980]: osdmap e106: 8 total, 8 up, 8 in 2026-03-10T07:05:00.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:04:59 vm02 ceph-mon[87980]: pgmap v70: 161 pgs: 5 peering, 36 active+undersized, 25 active+undersized+degraded, 95 active+clean; 457 KiB data, 181 MiB used, 160 GiB / 160 GiB avail; 80/630 objects degraded (12.698%) 2026-03-10T07:05:00.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:59 vm02 ceph-mon[86149]: osdmap e106: 8 total, 8 up, 8 in 2026-03-10T07:05:00.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:04:59 vm02 ceph-mon[86149]: pgmap v70: 161 pgs: 5 peering, 36 active+undersized, 25 active+undersized+degraded, 95 active+clean; 457 KiB data, 181 MiB used, 160 GiB / 160 GiB avail; 80/630 objects degraded (12.698%) 2026-03-10T07:05:01.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:01 vm05 ceph-mon[76604]: Health check update: Degraded data redundancy: 46/630 objects degraded (7.302%), 15 pgs degraded (PG_DEGRADED) 2026-03-10T07:05:01.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:01 vm02 ceph-mon[87980]: Health check update: Degraded data redundancy: 46/630 objects degraded (7.302%), 15 pgs degraded (PG_DEGRADED) 2026-03-10T07:05:01.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:01 vm02 ceph-mon[86149]: Health check update: Degraded data redundancy: 46/630 objects degraded (7.302%), 15 pgs degraded (PG_DEGRADED) 2026-03-10T07:05:02.085 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:05:01 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: ::ffff:192.168.123.105 - - [10/Mar/2026:07:05:01] "GET /metrics HTTP/1.1" 200 38054 "" "Prometheus/2.51.0" 2026-03-10T07:05:02.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:02 vm05 ceph-mon[76604]: pgmap v71: 161 pgs: 5 peering, 20 active+undersized, 15 active+undersized+degraded, 121 active+clean; 457 KiB data, 182 MiB used, 160 GiB / 160 GiB avail; 46/630 objects degraded (7.302%) 2026-03-10T07:05:02.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:02 vm02 ceph-mon[87980]: pgmap v71: 161 pgs: 5 peering, 20 active+undersized, 15 active+undersized+degraded, 121 active+clean; 457 KiB data, 182 MiB used, 160 GiB / 160 GiB avail; 46/630 objects degraded (7.302%) 2026-03-10T07:05:02.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:02 vm02 ceph-mon[86149]: pgmap v71: 161 pgs: 5 peering, 20 active+undersized, 15 active+undersized+degraded, 121 active+clean; 457 KiB data, 182 MiB used, 160 GiB / 160 GiB avail; 46/630 objects degraded (7.302%) 2026-03-10T07:05:03.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:03 vm05 ceph-mon[76604]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 46/630 objects degraded (7.302%), 15 pgs degraded) 2026-03-10T07:05:03.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:03 vm05 ceph-mon[76604]: Cluster is now healthy 2026-03-10T07:05:03.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:03 vm02 ceph-mon[87980]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 46/630 objects degraded (7.302%), 15 pgs degraded) 2026-03-10T07:05:03.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:03 vm02 ceph-mon[87980]: Cluster is now healthy 2026-03-10T07:05:03.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:03 vm02 ceph-mon[86149]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 46/630 objects degraded (7.302%), 15 pgs degraded) 2026-03-10T07:05:03.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:03 vm02 ceph-mon[86149]: Cluster is now healthy 2026-03-10T07:05:04.416 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:05:04 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:05:04.147Z caller=alerting.go:391 level=warn component="rule manager" alert="unsupported value type" msg="Expanding alert template failed" err="error executing template __alert_CephOSDDown: template: __alert_CephOSDDown:1:358: executing \"__alert_CephOSDDown\" at : error calling query: found duplicate series for the match group {ceph_daemon=\"osd.3\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.3\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"28bd35e6-1c4e-11f1-9057-21b3549603fc\", cluster_addr=\"192.168.123.102\", device_class=\"hdd\", hostname=\"vm02\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.102\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.3\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.102\", device_class=\"hdd\", hostname=\"vm02\", instance=\"192.168.123.105:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.102\"}];many-to-many matching not allowed: matching labels must be unique on one side" data="unsupported value type" 2026-03-10T07:05:04.417 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:05:04 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:05:04.148Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.3\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.3\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"28bd35e6-1c4e-11f1-9057-21b3549603fc\", cluster_addr=\"192.168.123.102\", device_class=\"hdd\", hostname=\"vm02\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.102\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.3\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.102\", device_class=\"hdd\", hostname=\"vm02\", instance=\"192.168.123.105:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.102\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T07:05:04.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:04 vm05 ceph-mon[76604]: pgmap v72: 161 pgs: 5 peering, 156 active+clean; 457 KiB data, 182 MiB used, 160 GiB / 160 GiB avail 2026-03-10T07:05:04.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:04 vm02 ceph-mon[87980]: pgmap v72: 161 pgs: 5 peering, 156 active+clean; 457 KiB data, 182 MiB used, 160 GiB / 160 GiB avail 2026-03-10T07:05:04.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:04 vm02 ceph-mon[86149]: pgmap v72: 161 pgs: 5 peering, 156 active+clean; 457 KiB data, 182 MiB used, 160 GiB / 160 GiB avail 2026-03-10T07:05:06.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:06 vm05 ceph-mon[76604]: pgmap v73: 161 pgs: 161 active+clean; 457 KiB data, 182 MiB used, 160 GiB / 160 GiB avail 2026-03-10T07:05:06.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:06 vm02 ceph-mon[86149]: pgmap v73: 161 pgs: 161 active+clean; 457 KiB data, 182 MiB used, 160 GiB / 160 GiB avail 2026-03-10T07:05:06.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:06 vm02 ceph-mon[87980]: pgmap v73: 161 pgs: 161 active+clean; 457 KiB data, 182 MiB used, 160 GiB / 160 GiB avail 2026-03-10T07:05:06.853 INFO:teuthology.orchestra.run.vm02.stdout:true 2026-03-10T07:05:07.247 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T07:05:07.247 INFO:teuthology.orchestra.run.vm02.stdout:alertmanager.a vm02 *:9093,9094 running (2m) 12s ago 6m 22.5M - 0.25.0 c8568f914cd2 520cbcc5ad98 2026-03-10T07:05:07.247 INFO:teuthology.orchestra.run.vm02.stdout:grafana.a vm05 *:3000 running (2m) 89s ago 6m 42.2M - dad864ee21e9 34567dcb4b51 2026-03-10T07:05:07.247 INFO:teuthology.orchestra.run.vm02.stdout:iscsi.foo.vm02.iphfbm vm02 running (113s) 12s ago 6m 52.0M - 3.5 e1d6a67b021e a06caff18850 2026-03-10T07:05:07.247 INFO:teuthology.orchestra.run.vm02.stdout:mgr.x vm05 *:8443,9283,8765 running (110s) 89s ago 8m 485M - 19.2.3-678-ge911bdeb 654f31e6858e cdd4c8db2d43 2026-03-10T07:05:07.247 INFO:teuthology.orchestra.run.vm02.stdout:mgr.y vm02 *:8443,9283,8765 running (2m) 12s ago 8m 554M - 19.2.3-678-ge911bdeb 654f31e6858e e1dc22afcf31 2026-03-10T07:05:07.247 INFO:teuthology.orchestra.run.vm02.stdout:mon.a vm02 running (106s) 12s ago 8m 48.7M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 194e84dd73c4 2026-03-10T07:05:07.247 INFO:teuthology.orchestra.run.vm02.stdout:mon.b vm05 running (91s) 89s ago 8m 20.6M 2048M 19.2.3-678-ge911bdeb 654f31e6858e e901cead026d 2026-03-10T07:05:07.247 INFO:teuthology.orchestra.run.vm02.stdout:mon.c vm02 running (96s) 12s ago 8m 42.5M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 959c5054b3d9 2026-03-10T07:05:07.247 INFO:teuthology.orchestra.run.vm02.stdout:node-exporter.a vm02 *:9100 running (2m) 12s ago 6m 9424k - 1.7.0 72c9c2088986 0bc0b34c732a 2026-03-10T07:05:07.247 INFO:teuthology.orchestra.run.vm02.stdout:node-exporter.b vm05 *:9100 running (2m) 89s ago 6m 8946k - 1.7.0 72c9c2088986 0129ed456f9d 2026-03-10T07:05:07.247 INFO:teuthology.orchestra.run.vm02.stdout:osd.0 vm02 running (79s) 12s ago 7m 69.0M 4096M 19.2.3-678-ge911bdeb 654f31e6858e bf86ac25f7fb 2026-03-10T07:05:07.247 INFO:teuthology.orchestra.run.vm02.stdout:osd.1 vm02 running (58s) 12s ago 7m 47.5M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 6a939dbe6adc 2026-03-10T07:05:07.247 INFO:teuthology.orchestra.run.vm02.stdout:osd.2 vm02 running (36s) 12s ago 7m 44.7M 4096M 19.2.3-678-ge911bdeb 654f31e6858e b0f12ddcd6b3 2026-03-10T07:05:07.247 INFO:teuthology.orchestra.run.vm02.stdout:osd.3 vm02 running (13s) 12s ago 7m 12.7M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 396c050a0b4f 2026-03-10T07:05:07.247 INFO:teuthology.orchestra.run.vm02.stdout:osd.4 vm05 running (7m) 89s ago 7m 53.0M 4096M 17.2.0 e1d6a67b021e dcf9e1b40e11 2026-03-10T07:05:07.247 INFO:teuthology.orchestra.run.vm02.stdout:osd.5 vm05 running (7m) 89s ago 7m 50.9M 4096M 17.2.0 e1d6a67b021e ef1a70593f89 2026-03-10T07:05:07.247 INFO:teuthology.orchestra.run.vm02.stdout:osd.6 vm05 running (7m) 89s ago 7m 53.6M 4096M 17.2.0 e1d6a67b021e 9f83e32d5eb6 2026-03-10T07:05:07.247 INFO:teuthology.orchestra.run.vm02.stdout:osd.7 vm05 running (6m) 89s ago 6m 52.2M 4096M 17.2.0 e1d6a67b021e 83d454094982 2026-03-10T07:05:07.247 INFO:teuthology.orchestra.run.vm02.stdout:prometheus.a vm05 *:9095 running (112s) 89s ago 6m 40.4M - 2.51.0 1d3b7f56885b 2f14ff887bc7 2026-03-10T07:05:07.247 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.kkmsll vm02 *:8000 running (6m) 12s ago 6m 94.2M - 17.2.0 e1d6a67b021e ef903c439808 2026-03-10T07:05:07.247 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm05.bmslvs vm05 *:8000 running (6m) 89s ago 6m 93.8M - 17.2.0 e1d6a67b021e acd35f4810d9 2026-03-10T07:05:07.247 INFO:teuthology.orchestra.run.vm02.stdout:rgw.smpl.vm02.kyvfxo vm02 *:80 running (6m) 12s ago 6m 92.1M - 17.2.0 e1d6a67b021e 6c68381e5378 2026-03-10T07:05:07.247 INFO:teuthology.orchestra.run.vm02.stdout:rgw.smpl.vm05.xjafam vm05 *:80 running (6m) 89s ago 6m 93.7M - 17.2.0 e1d6a67b021e 62a81876b05e 2026-03-10T07:05:07.253 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:05:06 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:05:06.947Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm02\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"28bd35e6-1c4e-11f1-9057-21b3549603fc\", domainname=\"(none)\", instance=\"vm02\", job=\"node\", machine=\"x86_64\", nodename=\"vm02\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm02\", job=\"node\", machine=\"x86_64\", nodename=\"vm02\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T07:05:07.492 INFO:teuthology.orchestra.run.vm02.stdout:{ 2026-03-10T07:05:07.492 INFO:teuthology.orchestra.run.vm02.stdout: "mon": { 2026-03-10T07:05:07.492 INFO:teuthology.orchestra.run.vm02.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 3 2026-03-10T07:05:07.492 INFO:teuthology.orchestra.run.vm02.stdout: }, 2026-03-10T07:05:07.492 INFO:teuthology.orchestra.run.vm02.stdout: "mgr": { 2026-03-10T07:05:07.492 INFO:teuthology.orchestra.run.vm02.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-10T07:05:07.492 INFO:teuthology.orchestra.run.vm02.stdout: }, 2026-03-10T07:05:07.492 INFO:teuthology.orchestra.run.vm02.stdout: "osd": { 2026-03-10T07:05:07.492 INFO:teuthology.orchestra.run.vm02.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 4, 2026-03-10T07:05:07.492 INFO:teuthology.orchestra.run.vm02.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 4 2026-03-10T07:05:07.492 INFO:teuthology.orchestra.run.vm02.stdout: }, 2026-03-10T07:05:07.492 INFO:teuthology.orchestra.run.vm02.stdout: "rgw": { 2026-03-10T07:05:07.492 INFO:teuthology.orchestra.run.vm02.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 4 2026-03-10T07:05:07.492 INFO:teuthology.orchestra.run.vm02.stdout: }, 2026-03-10T07:05:07.492 INFO:teuthology.orchestra.run.vm02.stdout: "overall": { 2026-03-10T07:05:07.492 INFO:teuthology.orchestra.run.vm02.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 8, 2026-03-10T07:05:07.492 INFO:teuthology.orchestra.run.vm02.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 9 2026-03-10T07:05:07.492 INFO:teuthology.orchestra.run.vm02.stdout: } 2026-03-10T07:05:07.492 INFO:teuthology.orchestra.run.vm02.stdout:} 2026-03-10T07:05:07.587 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:07 vm02 ceph-mon[86149]: from='client.15180 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:05:07.587 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:07 vm02 ceph-mon[87980]: from='client.15180 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:05:07.695 INFO:teuthology.orchestra.run.vm02.stdout:{ 2026-03-10T07:05:07.695 INFO:teuthology.orchestra.run.vm02.stdout: "target_image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", 2026-03-10T07:05:07.695 INFO:teuthology.orchestra.run.vm02.stdout: "in_progress": true, 2026-03-10T07:05:07.695 INFO:teuthology.orchestra.run.vm02.stdout: "which": "Upgrading all daemon types on all hosts", 2026-03-10T07:05:07.695 INFO:teuthology.orchestra.run.vm02.stdout: "services_complete": [ 2026-03-10T07:05:07.695 INFO:teuthology.orchestra.run.vm02.stdout: "mgr", 2026-03-10T07:05:07.695 INFO:teuthology.orchestra.run.vm02.stdout: "mon" 2026-03-10T07:05:07.695 INFO:teuthology.orchestra.run.vm02.stdout: ], 2026-03-10T07:05:07.695 INFO:teuthology.orchestra.run.vm02.stdout: "progress": "9/23 daemons upgraded", 2026-03-10T07:05:07.695 INFO:teuthology.orchestra.run.vm02.stdout: "message": "Currently upgrading osd daemons", 2026-03-10T07:05:07.695 INFO:teuthology.orchestra.run.vm02.stdout: "is_paused": false 2026-03-10T07:05:07.695 INFO:teuthology.orchestra.run.vm02.stdout:} 2026-03-10T07:05:07.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:07 vm05 ceph-mon[76604]: from='client.15180 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:05:07.934 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_OK 2026-03-10T07:05:08.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:08 vm05 ceph-mon[76604]: from='client.34242 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:05:08.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:08 vm05 ceph-mon[76604]: from='client.34248 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:05:08.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:08 vm05 ceph-mon[76604]: pgmap v74: 161 pgs: 161 active+clean; 457 KiB data, 182 MiB used, 160 GiB / 160 GiB avail; 451 B/s rd, 0 op/s 2026-03-10T07:05:08.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:08 vm05 ceph-mon[76604]: from='client.34254 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:05:08.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:08 vm05 ceph-mon[76604]: from='client.? 192.168.123.102:0/2697368149' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:05:08.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:08 vm05 ceph-mon[76604]: from='client.? 192.168.123.102:0/3139292613' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T07:05:08.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:08 vm02 ceph-mon[87980]: from='client.34242 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:05:08.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:08 vm02 ceph-mon[87980]: from='client.34248 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:05:08.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:08 vm02 ceph-mon[87980]: pgmap v74: 161 pgs: 161 active+clean; 457 KiB data, 182 MiB used, 160 GiB / 160 GiB avail; 451 B/s rd, 0 op/s 2026-03-10T07:05:08.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:08 vm02 ceph-mon[87980]: from='client.34254 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:05:08.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:08 vm02 ceph-mon[87980]: from='client.? 192.168.123.102:0/2697368149' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:05:08.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:08 vm02 ceph-mon[87980]: from='client.? 192.168.123.102:0/3139292613' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T07:05:08.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:08 vm02 ceph-mon[86149]: from='client.34242 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:05:08.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:08 vm02 ceph-mon[86149]: from='client.34248 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:05:08.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:08 vm02 ceph-mon[86149]: pgmap v74: 161 pgs: 161 active+clean; 457 KiB data, 182 MiB used, 160 GiB / 160 GiB avail; 451 B/s rd, 0 op/s 2026-03-10T07:05:08.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:08 vm02 ceph-mon[86149]: from='client.34254 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:05:08.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:08 vm02 ceph-mon[86149]: from='client.? 192.168.123.102:0/2697368149' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:05:08.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:08 vm02 ceph-mon[86149]: from='client.? 192.168.123.102:0/3139292613' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T07:05:09.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:09 vm05 ceph-mon[76604]: from='client.44185 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:05:09.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:09 vm02 ceph-mon[87980]: from='client.44185 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:05:09.834 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:09 vm02 ceph-mon[86149]: from='client.44185 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:05:10.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:10 vm05 ceph-mon[76604]: pgmap v75: 161 pgs: 161 active+clean; 457 KiB data, 182 MiB used, 160 GiB / 160 GiB avail; 406 B/s rd, 0 op/s 2026-03-10T07:05:10.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:10 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:05:10.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:10 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T07:05:10.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:10 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:05:10.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:10 vm02 ceph-mon[87980]: pgmap v75: 161 pgs: 161 active+clean; 457 KiB data, 182 MiB used, 160 GiB / 160 GiB avail; 406 B/s rd, 0 op/s 2026-03-10T07:05:10.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:10 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:05:10.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:10 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T07:05:10.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:10 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:05:10.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:10 vm02 ceph-mon[86149]: pgmap v75: 161 pgs: 161 active+clean; 457 KiB data, 182 MiB used, 160 GiB / 160 GiB avail; 406 B/s rd, 0 op/s 2026-03-10T07:05:10.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:10 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:05:10.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:10 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T07:05:10.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:10 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:05:12.084 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:05:11 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: ::ffff:192.168.123.105 - - [10/Mar/2026:07:05:11] "GET /metrics HTTP/1.1" 200 38064 "" "Prometheus/2.51.0" 2026-03-10T07:05:12.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:12 vm05 ceph-mon[76604]: pgmap v76: 161 pgs: 161 active+clean; 457 KiB data, 182 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 0 op/s 2026-03-10T07:05:12.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:12 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["4"], "max": 16}]: dispatch 2026-03-10T07:05:12.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:12 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:05:12.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:12 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.4"}]: dispatch 2026-03-10T07:05:12.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:12 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:05:12.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:12 vm02 ceph-mon[87980]: pgmap v76: 161 pgs: 161 active+clean; 457 KiB data, 182 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 0 op/s 2026-03-10T07:05:12.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:12 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["4"], "max": 16}]: dispatch 2026-03-10T07:05:12.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:12 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:05:12.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:12 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.4"}]: dispatch 2026-03-10T07:05:12.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:12 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:05:12.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:12 vm02 ceph-mon[86149]: pgmap v76: 161 pgs: 161 active+clean; 457 KiB data, 182 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 0 op/s 2026-03-10T07:05:12.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:12 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["4"], "max": 16}]: dispatch 2026-03-10T07:05:12.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:12 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:05:12.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:12 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.4"}]: dispatch 2026-03-10T07:05:12.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:12 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:05:13.098 INFO:journalctl@ceph.osd.4.vm05.stdout:Mar 10 07:05:13 vm05 systemd[1]: Stopping Ceph osd.4 for 28bd35e6-1c4e-11f1-9057-21b3549603fc... 2026-03-10T07:05:13.496 INFO:journalctl@ceph.osd.4.vm05.stdout:Mar 10 07:05:13 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-4[52009]: 2026-03-10T07:05:13.176+0000 7f3d99b20700 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.4 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-10T07:05:13.496 INFO:journalctl@ceph.osd.4.vm05.stdout:Mar 10 07:05:13 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-4[52009]: 2026-03-10T07:05:13.176+0000 7f3d99b20700 -1 osd.4 106 *** Got signal Terminated *** 2026-03-10T07:05:13.496 INFO:journalctl@ceph.osd.4.vm05.stdout:Mar 10 07:05:13 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-4[52009]: 2026-03-10T07:05:13.176+0000 7f3d99b20700 -1 osd.4 106 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-10T07:05:13.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:13 vm05 ceph-mon[76604]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["4"], "max": 16}]: dispatch 2026-03-10T07:05:13.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:13 vm05 ceph-mon[76604]: Upgrade: osd.4 is safe to restart 2026-03-10T07:05:13.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:13 vm05 ceph-mon[76604]: Upgrade: Updating osd.4 2026-03-10T07:05:13.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:13 vm05 ceph-mon[76604]: Deploying daemon osd.4 on vm05 2026-03-10T07:05:13.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:13 vm05 ceph-mon[76604]: osd.4 marked itself down and dead 2026-03-10T07:05:13.754 INFO:journalctl@ceph.osd.4.vm05.stdout:Mar 10 07:05:13 vm05 podman[78575]: 2026-03-10 07:05:13.561786986 +0000 UTC m=+0.403387010 container died dcf9e1b40e11af7b0147c09f3526dd212cfb0877933f9abfcbfeef57b04a4da0 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-4, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, vcs-type=git, architecture=x86_64, io.k8s.display-name=CentOS Stream 8, build-date=2022-05-03T08:36:31.336870, distribution-scope=public, RELEASE=HEAD, com.redhat.component=centos-stream-container, version=8, GIT_REPO=https://github.com/ceph/ceph-container.git, ceph=True, GIT_CLEAN=True, io.openshift.expose-services=, maintainer=Guillaume Abrioux , vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, io.openshift.tags=base centos centos-stream, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, vendor=Red Hat, Inc., description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, io.buildah.version=1.19.8, name=centos-stream, CEPH_POINT_RELEASE=-17.2.0, GIT_BRANCH=HEAD, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., release=754, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754) 2026-03-10T07:05:13.754 INFO:journalctl@ceph.osd.4.vm05.stdout:Mar 10 07:05:13 vm05 podman[78575]: 2026-03-10 07:05:13.587743209 +0000 UTC m=+0.429343233 container remove dcf9e1b40e11af7b0147c09f3526dd212cfb0877933f9abfcbfeef57b04a4da0 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-4, name=centos-stream, distribution-scope=public, io.buildah.version=1.19.8, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vendor=Red Hat, Inc., version=8, io.openshift.tags=base centos centos-stream, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, CEPH_POINT_RELEASE=-17.2.0, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, io.openshift.expose-services=, build-date=2022-05-03T08:36:31.336870, GIT_CLEAN=True, RELEASE=HEAD, vcs-type=git, architecture=x86_64, com.redhat.component=centos-stream-container, GIT_REPO=https://github.com/ceph/ceph-container.git, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, GIT_BRANCH=HEAD, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, maintainer=Guillaume Abrioux , io.k8s.display-name=CentOS Stream 8, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, release=754, ceph=True) 2026-03-10T07:05:13.754 INFO:journalctl@ceph.osd.4.vm05.stdout:Mar 10 07:05:13 vm05 bash[78575]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-4 2026-03-10T07:05:13.754 INFO:journalctl@ceph.osd.4.vm05.stdout:Mar 10 07:05:13 vm05 podman[78643]: 2026-03-10 07:05:13.732393326 +0000 UTC m=+0.016274035 container create 5a18de31b327d0d726122b96f31bd2e17f59d6b7097d5067228beba8297fa21d (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-4-deactivate, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.vendor=CentOS, CEPH_REF=squid, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20260223, org.opencontainers.image.authors=Ceph Release Team , io.buildah.version=1.41.3, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, ceph=True, OSD_FLAVOR=default) 2026-03-10T07:05:13.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:13 vm02 ceph-mon[87980]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["4"], "max": 16}]: dispatch 2026-03-10T07:05:13.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:13 vm02 ceph-mon[87980]: Upgrade: osd.4 is safe to restart 2026-03-10T07:05:13.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:13 vm02 ceph-mon[87980]: Upgrade: Updating osd.4 2026-03-10T07:05:13.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:13 vm02 ceph-mon[87980]: Deploying daemon osd.4 on vm05 2026-03-10T07:05:13.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:13 vm02 ceph-mon[87980]: osd.4 marked itself down and dead 2026-03-10T07:05:13.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:13 vm02 ceph-mon[86149]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["4"], "max": 16}]: dispatch 2026-03-10T07:05:13.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:13 vm02 ceph-mon[86149]: Upgrade: osd.4 is safe to restart 2026-03-10T07:05:13.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:13 vm02 ceph-mon[86149]: Upgrade: Updating osd.4 2026-03-10T07:05:13.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:13 vm02 ceph-mon[86149]: Deploying daemon osd.4 on vm05 2026-03-10T07:05:13.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:13 vm02 ceph-mon[86149]: osd.4 marked itself down and dead 2026-03-10T07:05:14.008 INFO:journalctl@ceph.osd.4.vm05.stdout:Mar 10 07:05:13 vm05 podman[78643]: 2026-03-10 07:05:13.774519709 +0000 UTC m=+0.058400418 container init 5a18de31b327d0d726122b96f31bd2e17f59d6b7097d5067228beba8297fa21d (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-4-deactivate, io.buildah.version=1.41.3, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, CEPH_REF=squid, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20260223, ceph=True, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.vendor=CentOS, OSD_FLAVOR=default) 2026-03-10T07:05:14.008 INFO:journalctl@ceph.osd.4.vm05.stdout:Mar 10 07:05:13 vm05 podman[78643]: 2026-03-10 07:05:13.778339446 +0000 UTC m=+0.062220155 container start 5a18de31b327d0d726122b96f31bd2e17f59d6b7097d5067228beba8297fa21d (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-4-deactivate, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, CEPH_REF=squid, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, ceph=True, org.opencontainers.image.authors=Ceph Release Team , io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9) 2026-03-10T07:05:14.008 INFO:journalctl@ceph.osd.4.vm05.stdout:Mar 10 07:05:13 vm05 podman[78643]: 2026-03-10 07:05:13.780763553 +0000 UTC m=+0.064644262 container attach 5a18de31b327d0d726122b96f31bd2e17f59d6b7097d5067228beba8297fa21d (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-4-deactivate, io.buildah.version=1.41.3, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_REF=squid) 2026-03-10T07:05:14.008 INFO:journalctl@ceph.osd.4.vm05.stdout:Mar 10 07:05:13 vm05 podman[78643]: 2026-03-10 07:05:13.725923078 +0000 UTC m=+0.009803798 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T07:05:14.008 INFO:journalctl@ceph.osd.4.vm05.stdout:Mar 10 07:05:13 vm05 podman[78643]: 2026-03-10 07:05:13.909682523 +0000 UTC m=+0.193563221 container died 5a18de31b327d0d726122b96f31bd2e17f59d6b7097d5067228beba8297fa21d (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-4-deactivate, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, OSD_FLAVOR=default, CEPH_REF=squid, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-10T07:05:14.008 INFO:journalctl@ceph.osd.4.vm05.stdout:Mar 10 07:05:13 vm05 podman[78643]: 2026-03-10 07:05:13.928801401 +0000 UTC m=+0.212682110 container remove 5a18de31b327d0d726122b96f31bd2e17f59d6b7097d5067228beba8297fa21d (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-4-deactivate, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.41.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, CEPH_REF=squid) 2026-03-10T07:05:14.008 INFO:journalctl@ceph.osd.4.vm05.stdout:Mar 10 07:05:13 vm05 systemd[1]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@osd.4.service: Deactivated successfully. 2026-03-10T07:05:14.008 INFO:journalctl@ceph.osd.4.vm05.stdout:Mar 10 07:05:13 vm05 systemd[1]: Stopped Ceph osd.4 for 28bd35e6-1c4e-11f1-9057-21b3549603fc. 2026-03-10T07:05:14.008 INFO:journalctl@ceph.osd.4.vm05.stdout:Mar 10 07:05:13 vm05 systemd[1]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@osd.4.service: Consumed 3.806s CPU time. 2026-03-10T07:05:14.366 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:05:14 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:05:14.147Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.4\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.4\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"28bd35e6-1c4e-11f1-9057-21b3549603fc\", cluster_addr=\"192.168.123.105\", device_class=\"hdd\", hostname=\"vm05\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.105\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.4\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.105\", device_class=\"hdd\", hostname=\"vm05\", instance=\"192.168.123.105:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.105\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T07:05:14.366 INFO:journalctl@ceph.osd.4.vm05.stdout:Mar 10 07:05:14 vm05 systemd[1]: Starting Ceph osd.4 for 28bd35e6-1c4e-11f1-9057-21b3549603fc... 2026-03-10T07:05:14.366 INFO:journalctl@ceph.osd.4.vm05.stdout:Mar 10 07:05:14 vm05 podman[78745]: 2026-03-10 07:05:14.226280831 +0000 UTC m=+0.016526828 container create f05da789367c76e26a2e1b761916d3ab26588f8da989b70f3734bd965cf13f5c (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-4-activate, org.label-schema.build-date=20260223, ceph=True, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=squid, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git) 2026-03-10T07:05:14.366 INFO:journalctl@ceph.osd.4.vm05.stdout:Mar 10 07:05:14 vm05 podman[78745]: 2026-03-10 07:05:14.267474989 +0000 UTC m=+0.057720986 container init f05da789367c76e26a2e1b761916d3ab26588f8da989b70f3734bd965cf13f5c (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-4-activate, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.label-schema.build-date=20260223, io.buildah.version=1.41.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-10T07:05:14.366 INFO:journalctl@ceph.osd.4.vm05.stdout:Mar 10 07:05:14 vm05 podman[78745]: 2026-03-10 07:05:14.270490121 +0000 UTC m=+0.060736118 container start f05da789367c76e26a2e1b761916d3ab26588f8da989b70f3734bd965cf13f5c (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-4-activate, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.build-date=20260223, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, ceph=True, org.opencontainers.image.authors=Ceph Release Team ) 2026-03-10T07:05:14.366 INFO:journalctl@ceph.osd.4.vm05.stdout:Mar 10 07:05:14 vm05 podman[78745]: 2026-03-10 07:05:14.27132314 +0000 UTC m=+0.061569137 container attach f05da789367c76e26a2e1b761916d3ab26588f8da989b70f3734bd965cf13f5c (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-4-activate, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, io.buildah.version=1.41.3, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.label-schema.build-date=20260223, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, CEPH_REF=squid, org.label-schema.vendor=CentOS) 2026-03-10T07:05:14.366 INFO:journalctl@ceph.osd.4.vm05.stdout:Mar 10 07:05:14 vm05 podman[78745]: 2026-03-10 07:05:14.219393934 +0000 UTC m=+0.009639931 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T07:05:14.366 INFO:journalctl@ceph.osd.4.vm05.stdout:Mar 10 07:05:14 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-4-activate[78758]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T07:05:14.366 INFO:journalctl@ceph.osd.4.vm05.stdout:Mar 10 07:05:14 vm05 bash[78745]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T07:05:14.366 INFO:journalctl@ceph.osd.4.vm05.stdout:Mar 10 07:05:14 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-4-activate[78758]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T07:05:14.366 INFO:journalctl@ceph.osd.4.vm05.stdout:Mar 10 07:05:14 vm05 bash[78745]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T07:05:14.755 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:14 vm05 ceph-mon[76604]: pgmap v77: 161 pgs: 161 active+clean; 457 KiB data, 182 MiB used, 160 GiB / 160 GiB avail; 767 B/s rd, 0 op/s 2026-03-10T07:05:14.755 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:14 vm05 ceph-mon[76604]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-10T07:05:14.755 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:14 vm05 ceph-mon[76604]: osdmap e107: 8 total, 7 up, 8 in 2026-03-10T07:05:14.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:14 vm02 ceph-mon[87980]: pgmap v77: 161 pgs: 161 active+clean; 457 KiB data, 182 MiB used, 160 GiB / 160 GiB avail; 767 B/s rd, 0 op/s 2026-03-10T07:05:14.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:14 vm02 ceph-mon[87980]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-10T07:05:14.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:14 vm02 ceph-mon[87980]: osdmap e107: 8 total, 7 up, 8 in 2026-03-10T07:05:14.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:14 vm02 ceph-mon[86149]: pgmap v77: 161 pgs: 161 active+clean; 457 KiB data, 182 MiB used, 160 GiB / 160 GiB avail; 767 B/s rd, 0 op/s 2026-03-10T07:05:14.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:14 vm02 ceph-mon[86149]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-10T07:05:14.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:14 vm02 ceph-mon[86149]: osdmap e107: 8 total, 7 up, 8 in 2026-03-10T07:05:15.254 INFO:journalctl@ceph.osd.4.vm05.stdout:Mar 10 07:05:14 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-4-activate[78758]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-10T07:05:15.254 INFO:journalctl@ceph.osd.4.vm05.stdout:Mar 10 07:05:14 vm05 bash[78745]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-10T07:05:15.254 INFO:journalctl@ceph.osd.4.vm05.stdout:Mar 10 07:05:14 vm05 bash[78745]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T07:05:15.254 INFO:journalctl@ceph.osd.4.vm05.stdout:Mar 10 07:05:14 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-4-activate[78758]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T07:05:15.254 INFO:journalctl@ceph.osd.4.vm05.stdout:Mar 10 07:05:14 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-4-activate[78758]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T07:05:15.254 INFO:journalctl@ceph.osd.4.vm05.stdout:Mar 10 07:05:14 vm05 bash[78745]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T07:05:15.254 INFO:journalctl@ceph.osd.4.vm05.stdout:Mar 10 07:05:15 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-4-activate[78758]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-4 2026-03-10T07:05:15.254 INFO:journalctl@ceph.osd.4.vm05.stdout:Mar 10 07:05:15 vm05 bash[78745]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-4 2026-03-10T07:05:15.254 INFO:journalctl@ceph.osd.4.vm05.stdout:Mar 10 07:05:15 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-4-activate[78758]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-1eee7adf-60b7-402a-91c7-aa8099401c8d/osd-block-fa4107e7-dcd8-4c34-a994-7706880ac944 --path /var/lib/ceph/osd/ceph-4 --no-mon-config 2026-03-10T07:05:15.254 INFO:journalctl@ceph.osd.4.vm05.stdout:Mar 10 07:05:15 vm05 bash[78745]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-1eee7adf-60b7-402a-91c7-aa8099401c8d/osd-block-fa4107e7-dcd8-4c34-a994-7706880ac944 --path /var/lib/ceph/osd/ceph-4 --no-mon-config 2026-03-10T07:05:15.754 INFO:journalctl@ceph.osd.4.vm05.stdout:Mar 10 07:05:15 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-4-activate[78758]: Running command: /usr/bin/ln -snf /dev/ceph-1eee7adf-60b7-402a-91c7-aa8099401c8d/osd-block-fa4107e7-dcd8-4c34-a994-7706880ac944 /var/lib/ceph/osd/ceph-4/block 2026-03-10T07:05:15.754 INFO:journalctl@ceph.osd.4.vm05.stdout:Mar 10 07:05:15 vm05 bash[78745]: Running command: /usr/bin/ln -snf /dev/ceph-1eee7adf-60b7-402a-91c7-aa8099401c8d/osd-block-fa4107e7-dcd8-4c34-a994-7706880ac944 /var/lib/ceph/osd/ceph-4/block 2026-03-10T07:05:15.754 INFO:journalctl@ceph.osd.4.vm05.stdout:Mar 10 07:05:15 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-4-activate[78758]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-4/block 2026-03-10T07:05:15.754 INFO:journalctl@ceph.osd.4.vm05.stdout:Mar 10 07:05:15 vm05 bash[78745]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-4/block 2026-03-10T07:05:15.754 INFO:journalctl@ceph.osd.4.vm05.stdout:Mar 10 07:05:15 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-4-activate[78758]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-0 2026-03-10T07:05:15.754 INFO:journalctl@ceph.osd.4.vm05.stdout:Mar 10 07:05:15 vm05 bash[78745]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-0 2026-03-10T07:05:15.754 INFO:journalctl@ceph.osd.4.vm05.stdout:Mar 10 07:05:15 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-4-activate[78758]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-4 2026-03-10T07:05:15.754 INFO:journalctl@ceph.osd.4.vm05.stdout:Mar 10 07:05:15 vm05 bash[78745]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-4 2026-03-10T07:05:15.754 INFO:journalctl@ceph.osd.4.vm05.stdout:Mar 10 07:05:15 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-4-activate[78758]: --> ceph-volume lvm activate successful for osd ID: 4 2026-03-10T07:05:15.754 INFO:journalctl@ceph.osd.4.vm05.stdout:Mar 10 07:05:15 vm05 bash[78745]: --> ceph-volume lvm activate successful for osd ID: 4 2026-03-10T07:05:15.754 INFO:journalctl@ceph.osd.4.vm05.stdout:Mar 10 07:05:15 vm05 podman[78965]: 2026-03-10 07:05:15.359951008 +0000 UTC m=+0.010439046 container died f05da789367c76e26a2e1b761916d3ab26588f8da989b70f3734bd965cf13f5c (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-4-activate, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team , OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, CEPH_REF=squid, org.label-schema.build-date=20260223) 2026-03-10T07:05:15.754 INFO:journalctl@ceph.osd.4.vm05.stdout:Mar 10 07:05:15 vm05 podman[78965]: 2026-03-10 07:05:15.375653272 +0000 UTC m=+0.026141300 container remove f05da789367c76e26a2e1b761916d3ab26588f8da989b70f3734bd965cf13f5c (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-4-activate, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, OSD_FLAVOR=default, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_REF=squid, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team , ceph=True, org.label-schema.build-date=20260223) 2026-03-10T07:05:15.754 INFO:journalctl@ceph.osd.4.vm05.stdout:Mar 10 07:05:15 vm05 podman[79004]: 2026-03-10 07:05:15.4680258 +0000 UTC m=+0.015140754 container create 05476fb1f9d8f112d690b6cd2e8b58cc057cd83c3131a134d5a9cf06b2c139d6 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-4, ceph=True, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=squid, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.build-date=20260223) 2026-03-10T07:05:15.754 INFO:journalctl@ceph.osd.4.vm05.stdout:Mar 10 07:05:15 vm05 podman[79004]: 2026-03-10 07:05:15.501891494 +0000 UTC m=+0.049006459 container init 05476fb1f9d8f112d690b6cd2e8b58cc057cd83c3131a134d5a9cf06b2c139d6 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-4, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.build-date=20260223, io.buildah.version=1.41.3, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid, org.label-schema.license=GPLv2, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team ) 2026-03-10T07:05:15.754 INFO:journalctl@ceph.osd.4.vm05.stdout:Mar 10 07:05:15 vm05 podman[79004]: 2026-03-10 07:05:15.505028996 +0000 UTC m=+0.052143940 container start 05476fb1f9d8f112d690b6cd2e8b58cc057cd83c3131a134d5a9cf06b2c139d6 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-4, org.label-schema.build-date=20260223, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, ceph=True, CEPH_REF=squid, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team , GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0) 2026-03-10T07:05:15.754 INFO:journalctl@ceph.osd.4.vm05.stdout:Mar 10 07:05:15 vm05 bash[79004]: 05476fb1f9d8f112d690b6cd2e8b58cc057cd83c3131a134d5a9cf06b2c139d6 2026-03-10T07:05:15.754 INFO:journalctl@ceph.osd.4.vm05.stdout:Mar 10 07:05:15 vm05 podman[79004]: 2026-03-10 07:05:15.46220617 +0000 UTC m=+0.009321135 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T07:05:15.754 INFO:journalctl@ceph.osd.4.vm05.stdout:Mar 10 07:05:15 vm05 systemd[1]: Started Ceph osd.4 for 28bd35e6-1c4e-11f1-9057-21b3549603fc. 2026-03-10T07:05:15.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:15 vm05 ceph-mon[76604]: osdmap e108: 8 total, 7 up, 8 in 2026-03-10T07:05:15.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:15 vm02 ceph-mon[87980]: osdmap e108: 8 total, 7 up, 8 in 2026-03-10T07:05:15.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:15 vm02 ceph-mon[86149]: osdmap e108: 8 total, 7 up, 8 in 2026-03-10T07:05:16.148 INFO:journalctl@ceph.osd.4.vm05.stdout:Mar 10 07:05:16 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-4[79014]: 2026-03-10T07:05:16.071+0000 7f9b33371740 -1 Falling back to public interface 2026-03-10T07:05:16.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:16 vm05 ceph-mon[76604]: pgmap v80: 161 pgs: 23 stale+active+clean, 138 active+clean; 457 KiB data, 182 MiB used, 160 GiB / 160 GiB avail; 639 B/s rd, 0 op/s 2026-03-10T07:05:16.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:16 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:05:16.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:16 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:05:16.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:16 vm02 ceph-mon[87980]: pgmap v80: 161 pgs: 23 stale+active+clean, 138 active+clean; 457 KiB data, 182 MiB used, 160 GiB / 160 GiB avail; 639 B/s rd, 0 op/s 2026-03-10T07:05:16.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:16 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:05:16.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:16 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:05:16.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:16 vm02 ceph-mon[86149]: pgmap v80: 161 pgs: 23 stale+active+clean, 138 active+clean; 457 KiB data, 182 MiB used, 160 GiB / 160 GiB avail; 639 B/s rd, 0 op/s 2026-03-10T07:05:16.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:16 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:05:16.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:16 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:05:17.253 INFO:journalctl@ceph.osd.4.vm05.stdout:Mar 10 07:05:16 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-4[79014]: 2026-03-10T07:05:16.941+0000 7f9b33371740 -1 osd.4 0 read_superblock omap replica is missing. 2026-03-10T07:05:17.253 INFO:journalctl@ceph.osd.4.vm05.stdout:Mar 10 07:05:16 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-4[79014]: 2026-03-10T07:05:16.987+0000 7f9b33371740 -1 osd.4 106 log_to_monitors true 2026-03-10T07:05:17.253 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:05:16 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:05:16.947Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm02\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"28bd35e6-1c4e-11f1-9057-21b3549603fc\", domainname=\"(none)\", instance=\"vm02\", job=\"node\", machine=\"x86_64\", nodename=\"vm02\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm02\", job=\"node\", machine=\"x86_64\", nodename=\"vm02\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T07:05:17.550 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:17 vm05 ceph-mon[76604]: from='client.15180 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:05:17.550 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:17 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:05:17.550 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:17 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:05:17.550 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:17 vm05 ceph-mon[76604]: from='osd.4 [v2:192.168.123.105:6800/1501498759,v1:192.168.123.105:6801/1501498759]' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-10T07:05:17.550 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:17 vm05 ceph-mon[76604]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-10T07:05:17.550 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:17 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:05:17.550 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:17 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:05:17.805 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:17 vm02 ceph-mon[87980]: from='client.15180 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:05:17.805 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:17 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:05:17.805 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:17 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:05:17.805 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:17 vm02 ceph-mon[87980]: from='osd.4 [v2:192.168.123.105:6800/1501498759,v1:192.168.123.105:6801/1501498759]' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-10T07:05:17.805 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:17 vm02 ceph-mon[87980]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-10T07:05:17.805 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:17 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:05:17.805 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:17 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:05:17.805 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:17 vm02 ceph-mon[86149]: from='client.15180 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:05:17.805 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:17 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:05:17.805 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:17 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:05:17.805 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:17 vm02 ceph-mon[86149]: from='osd.4 [v2:192.168.123.105:6800/1501498759,v1:192.168.123.105:6801/1501498759]' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-10T07:05:17.805 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:17 vm02 ceph-mon[86149]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-10T07:05:17.805 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:17 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:05:17.805 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:17 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:05:18.003 INFO:journalctl@ceph.osd.4.vm05.stdout:Mar 10 07:05:17 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-4[79014]: 2026-03-10T07:05:17.728+0000 7f9b2b11c640 -1 osd.4 106 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-10T07:05:18.550 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:05:18 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: 2026-03-10T07:05:18.405+0000 7f03c12b6640 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (12 PGs are or would become offline) 2026-03-10T07:05:18.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:18 vm02 ceph-mon[87980]: pgmap v81: 161 pgs: 36 active+undersized, 3 stale+active+clean, 26 active+undersized+degraded, 96 active+clean; 457 KiB data, 183 MiB used, 160 GiB / 160 GiB avail; 102/630 objects degraded (16.190%) 2026-03-10T07:05:18.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:18 vm02 ceph-mon[87980]: Health check failed: Degraded data redundancy: 102/630 objects degraded (16.190%), 26 pgs degraded (PG_DEGRADED) 2026-03-10T07:05:18.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:18 vm02 ceph-mon[87980]: from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]': finished 2026-03-10T07:05:18.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:18 vm02 ceph-mon[87980]: osdmap e109: 8 total, 7 up, 8 in 2026-03-10T07:05:18.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:18 vm02 ceph-mon[87980]: from='osd.4 [v2:192.168.123.105:6800/1501498759,v1:192.168.123.105:6801/1501498759]' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm05", "root=default"]}]: dispatch 2026-03-10T07:05:18.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:18 vm02 ceph-mon[87980]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm05", "root=default"]}]: dispatch 2026-03-10T07:05:18.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:18 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:05:18.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:18 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:05:18.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:18 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:05:18.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:18 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T07:05:18.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:18 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:05:18.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:18 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T07:05:18.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:18 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:05:18.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:18 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:05:18.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:18 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:05:18.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:18 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["5"], "max": 16}]: dispatch 2026-03-10T07:05:18.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:18 vm02 ceph-mon[86149]: pgmap v81: 161 pgs: 36 active+undersized, 3 stale+active+clean, 26 active+undersized+degraded, 96 active+clean; 457 KiB data, 183 MiB used, 160 GiB / 160 GiB avail; 102/630 objects degraded (16.190%) 2026-03-10T07:05:18.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:18 vm02 ceph-mon[86149]: Health check failed: Degraded data redundancy: 102/630 objects degraded (16.190%), 26 pgs degraded (PG_DEGRADED) 2026-03-10T07:05:18.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:18 vm02 ceph-mon[86149]: from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]': finished 2026-03-10T07:05:18.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:18 vm02 ceph-mon[86149]: osdmap e109: 8 total, 7 up, 8 in 2026-03-10T07:05:18.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:18 vm02 ceph-mon[86149]: from='osd.4 [v2:192.168.123.105:6800/1501498759,v1:192.168.123.105:6801/1501498759]' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm05", "root=default"]}]: dispatch 2026-03-10T07:05:18.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:18 vm02 ceph-mon[86149]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm05", "root=default"]}]: dispatch 2026-03-10T07:05:18.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:18 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:05:18.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:18 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:05:18.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:18 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:05:18.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:18 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T07:05:18.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:18 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:05:18.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:18 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T07:05:18.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:18 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:05:18.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:18 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:05:18.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:18 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:05:18.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:18 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["5"], "max": 16}]: dispatch 2026-03-10T07:05:19.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:18 vm05 ceph-mon[76604]: pgmap v81: 161 pgs: 36 active+undersized, 3 stale+active+clean, 26 active+undersized+degraded, 96 active+clean; 457 KiB data, 183 MiB used, 160 GiB / 160 GiB avail; 102/630 objects degraded (16.190%) 2026-03-10T07:05:19.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:18 vm05 ceph-mon[76604]: Health check failed: Degraded data redundancy: 102/630 objects degraded (16.190%), 26 pgs degraded (PG_DEGRADED) 2026-03-10T07:05:19.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:18 vm05 ceph-mon[76604]: from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]': finished 2026-03-10T07:05:19.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:18 vm05 ceph-mon[76604]: osdmap e109: 8 total, 7 up, 8 in 2026-03-10T07:05:19.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:18 vm05 ceph-mon[76604]: from='osd.4 [v2:192.168.123.105:6800/1501498759,v1:192.168.123.105:6801/1501498759]' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm05", "root=default"]}]: dispatch 2026-03-10T07:05:19.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:18 vm05 ceph-mon[76604]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm05", "root=default"]}]: dispatch 2026-03-10T07:05:19.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:18 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:05:19.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:18 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:05:19.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:18 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:05:19.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:18 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T07:05:19.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:18 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:05:19.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:18 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T07:05:19.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:18 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:05:19.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:18 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:05:19.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:18 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:05:19.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:18 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["5"], "max": 16}]: dispatch 2026-03-10T07:05:19.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:19 vm02 ceph-mon[87980]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["5"], "max": 16}]: dispatch 2026-03-10T07:05:19.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:19 vm02 ceph-mon[87980]: Upgrade: unsafe to stop osd(s) at this time (12 PGs are or would become offline) 2026-03-10T07:05:19.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:19 vm02 ceph-mon[87980]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-10T07:05:19.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:19 vm02 ceph-mon[87980]: osd.4 [v2:192.168.123.105:6800/1501498759,v1:192.168.123.105:6801/1501498759] boot 2026-03-10T07:05:19.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:19 vm02 ceph-mon[87980]: osdmap e110: 8 total, 8 up, 8 in 2026-03-10T07:05:19.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:19 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T07:05:19.834 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:19 vm02 ceph-mon[86149]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["5"], "max": 16}]: dispatch 2026-03-10T07:05:19.834 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:19 vm02 ceph-mon[86149]: Upgrade: unsafe to stop osd(s) at this time (12 PGs are or would become offline) 2026-03-10T07:05:19.834 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:19 vm02 ceph-mon[86149]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-10T07:05:19.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:19 vm02 ceph-mon[86149]: osd.4 [v2:192.168.123.105:6800/1501498759,v1:192.168.123.105:6801/1501498759] boot 2026-03-10T07:05:19.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:19 vm02 ceph-mon[86149]: osdmap e110: 8 total, 8 up, 8 in 2026-03-10T07:05:19.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:19 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T07:05:20.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:19 vm05 ceph-mon[76604]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["5"], "max": 16}]: dispatch 2026-03-10T07:05:20.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:19 vm05 ceph-mon[76604]: Upgrade: unsafe to stop osd(s) at this time (12 PGs are or would become offline) 2026-03-10T07:05:20.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:19 vm05 ceph-mon[76604]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-10T07:05:20.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:19 vm05 ceph-mon[76604]: osd.4 [v2:192.168.123.105:6800/1501498759,v1:192.168.123.105:6801/1501498759] boot 2026-03-10T07:05:20.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:19 vm05 ceph-mon[76604]: osdmap e110: 8 total, 8 up, 8 in 2026-03-10T07:05:20.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:19 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T07:05:20.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:20 vm02 ceph-mon[87980]: pgmap v84: 161 pgs: 39 active+undersized, 29 active+undersized+degraded, 93 active+clean; 457 KiB data, 201 MiB used, 160 GiB / 160 GiB avail; 109/630 objects degraded (17.302%) 2026-03-10T07:05:20.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:20 vm02 ceph-mon[87980]: osdmap e111: 8 total, 8 up, 8 in 2026-03-10T07:05:20.834 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:20 vm02 ceph-mon[86149]: pgmap v84: 161 pgs: 39 active+undersized, 29 active+undersized+degraded, 93 active+clean; 457 KiB data, 201 MiB used, 160 GiB / 160 GiB avail; 109/630 objects degraded (17.302%) 2026-03-10T07:05:20.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:20 vm02 ceph-mon[86149]: osdmap e111: 8 total, 8 up, 8 in 2026-03-10T07:05:21.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:20 vm05 ceph-mon[76604]: pgmap v84: 161 pgs: 39 active+undersized, 29 active+undersized+degraded, 93 active+clean; 457 KiB data, 201 MiB used, 160 GiB / 160 GiB avail; 109/630 objects degraded (17.302%) 2026-03-10T07:05:21.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:20 vm05 ceph-mon[76604]: osdmap e111: 8 total, 8 up, 8 in 2026-03-10T07:05:22.084 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:05:21 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: ::ffff:192.168.123.105 - - [10/Mar/2026:07:05:21] "GET /metrics HTTP/1.1" 200 38064 "" "Prometheus/2.51.0" 2026-03-10T07:05:23.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:22 vm05 ceph-mon[76604]: pgmap v86: 161 pgs: 23 active+undersized, 15 active+undersized+degraded, 123 active+clean; 457 KiB data, 201 MiB used, 160 GiB / 160 GiB avail; 42/630 objects degraded (6.667%) 2026-03-10T07:05:23.084 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:22 vm02 ceph-mon[87980]: pgmap v86: 161 pgs: 23 active+undersized, 15 active+undersized+degraded, 123 active+clean; 457 KiB data, 201 MiB used, 160 GiB / 160 GiB avail; 42/630 objects degraded (6.667%) 2026-03-10T07:05:23.084 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:22 vm02 ceph-mon[86149]: pgmap v86: 161 pgs: 23 active+undersized, 15 active+undersized+degraded, 123 active+clean; 457 KiB data, 201 MiB used, 160 GiB / 160 GiB avail; 42/630 objects degraded (6.667%) 2026-03-10T07:05:24.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:23 vm05 ceph-mon[76604]: pgmap v87: 161 pgs: 2 active+undersized, 1 active+undersized+degraded, 158 active+clean; 457 KiB data, 202 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 0 op/s; 1/630 objects degraded (0.159%) 2026-03-10T07:05:24.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:23 vm05 ceph-mon[76604]: Health check update: Degraded data redundancy: 1/630 objects degraded (0.159%), 1 pg degraded (PG_DEGRADED) 2026-03-10T07:05:24.084 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:23 vm02 ceph-mon[87980]: pgmap v87: 161 pgs: 2 active+undersized, 1 active+undersized+degraded, 158 active+clean; 457 KiB data, 202 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 0 op/s; 1/630 objects degraded (0.159%) 2026-03-10T07:05:24.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:23 vm02 ceph-mon[87980]: Health check update: Degraded data redundancy: 1/630 objects degraded (0.159%), 1 pg degraded (PG_DEGRADED) 2026-03-10T07:05:24.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:23 vm02 ceph-mon[86149]: pgmap v87: 161 pgs: 2 active+undersized, 1 active+undersized+degraded, 158 active+clean; 457 KiB data, 202 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 0 op/s; 1/630 objects degraded (0.159%) 2026-03-10T07:05:24.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:23 vm02 ceph-mon[86149]: Health check update: Degraded data redundancy: 1/630 objects degraded (0.159%), 1 pg degraded (PG_DEGRADED) 2026-03-10T07:05:24.504 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:05:24 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:05:24.147Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.4\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.4\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"28bd35e6-1c4e-11f1-9057-21b3549603fc\", cluster_addr=\"192.168.123.105\", device_class=\"hdd\", hostname=\"vm05\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.105\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.4\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.105\", device_class=\"hdd\", hostname=\"vm05\", instance=\"192.168.123.105:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.105\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T07:05:26.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:26 vm05 ceph-mon[76604]: pgmap v88: 161 pgs: 161 active+clean; 457 KiB data, 202 MiB used, 160 GiB / 160 GiB avail; 139 B/s rd, 0 op/s 2026-03-10T07:05:26.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:26 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:05:26.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:26 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T07:05:26.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:26 vm05 ceph-mon[76604]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 1/630 objects degraded (0.159%), 1 pg degraded) 2026-03-10T07:05:26.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:26 vm05 ceph-mon[76604]: Cluster is now healthy 2026-03-10T07:05:26.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:26 vm02 ceph-mon[87980]: pgmap v88: 161 pgs: 161 active+clean; 457 KiB data, 202 MiB used, 160 GiB / 160 GiB avail; 139 B/s rd, 0 op/s 2026-03-10T07:05:26.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:26 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:05:26.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:26 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T07:05:26.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:26 vm02 ceph-mon[87980]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 1/630 objects degraded (0.159%), 1 pg degraded) 2026-03-10T07:05:26.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:26 vm02 ceph-mon[87980]: Cluster is now healthy 2026-03-10T07:05:26.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:26 vm02 ceph-mon[86149]: pgmap v88: 161 pgs: 161 active+clean; 457 KiB data, 202 MiB used, 160 GiB / 160 GiB avail; 139 B/s rd, 0 op/s 2026-03-10T07:05:26.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:26 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:05:26.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:26 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T07:05:26.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:26 vm02 ceph-mon[86149]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 1/630 objects degraded (0.159%), 1 pg degraded) 2026-03-10T07:05:26.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:26 vm02 ceph-mon[86149]: Cluster is now healthy 2026-03-10T07:05:27.253 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:05:26 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:05:26.948Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm02\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"28bd35e6-1c4e-11f1-9057-21b3549603fc\", domainname=\"(none)\", instance=\"vm02\", job=\"node\", machine=\"x86_64\", nodename=\"vm02\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm02\", job=\"node\", machine=\"x86_64\", nodename=\"vm02\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T07:05:27.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:27 vm05 ceph-mon[76604]: from='client.15180 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:05:27.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:27 vm02 ceph-mon[87980]: from='client.15180 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:05:27.834 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:27 vm02 ceph-mon[86149]: from='client.15180 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:05:28.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:28 vm02 ceph-mon[87980]: pgmap v89: 161 pgs: 161 active+clean; 457 KiB data, 202 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:05:28.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:28 vm02 ceph-mon[86149]: pgmap v89: 161 pgs: 161 active+clean; 457 KiB data, 202 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:05:28.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:28 vm05 ceph-mon[76604]: pgmap v89: 161 pgs: 161 active+clean; 457 KiB data, 202 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:05:30.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:30 vm05 ceph-mon[76604]: pgmap v90: 161 pgs: 161 active+clean; 457 KiB data, 202 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-10T07:05:30.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:30 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:05:30.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:30 vm02 ceph-mon[87980]: pgmap v90: 161 pgs: 161 active+clean; 457 KiB data, 202 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-10T07:05:30.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:30 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:05:30.834 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:30 vm02 ceph-mon[86149]: pgmap v90: 161 pgs: 161 active+clean; 457 KiB data, 202 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-10T07:05:30.834 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:30 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:05:32.085 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:05:31 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: ::ffff:192.168.123.105 - - [10/Mar/2026:07:05:31] "GET /metrics HTTP/1.1" 200 38070 "" "Prometheus/2.51.0" 2026-03-10T07:05:32.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:32 vm05 ceph-mon[76604]: pgmap v91: 161 pgs: 161 active+clean; 457 KiB data, 202 MiB used, 160 GiB / 160 GiB avail; 903 B/s rd, 0 op/s 2026-03-10T07:05:32.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:32 vm02 ceph-mon[87980]: pgmap v91: 161 pgs: 161 active+clean; 457 KiB data, 202 MiB used, 160 GiB / 160 GiB avail; 903 B/s rd, 0 op/s 2026-03-10T07:05:32.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:32 vm02 ceph-mon[86149]: pgmap v91: 161 pgs: 161 active+clean; 457 KiB data, 202 MiB used, 160 GiB / 160 GiB avail; 903 B/s rd, 0 op/s 2026-03-10T07:05:34.413 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:05:34 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:05:34.147Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.5\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.5\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"28bd35e6-1c4e-11f1-9057-21b3549603fc\", cluster_addr=\"192.168.123.105\", device_class=\"hdd\", hostname=\"vm05\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.105\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.5\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.105\", device_class=\"hdd\", hostname=\"vm05\", instance=\"192.168.123.105:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.105\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T07:05:34.413 INFO:journalctl@ceph.osd.5.vm05.stdout:Mar 10 07:05:34 vm05 systemd[1]: Stopping Ceph osd.5 for 28bd35e6-1c4e-11f1-9057-21b3549603fc... 2026-03-10T07:05:34.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:34 vm05 ceph-mon[76604]: pgmap v92: 161 pgs: 161 active+clean; 457 KiB data, 202 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:05:34.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:34 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["5"], "max": 16}]: dispatch 2026-03-10T07:05:34.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:34 vm05 ceph-mon[76604]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["5"], "max": 16}]: dispatch 2026-03-10T07:05:34.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:34 vm05 ceph-mon[76604]: Upgrade: osd.5 is safe to restart 2026-03-10T07:05:34.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:34 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:05:34.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:34 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.5"}]: dispatch 2026-03-10T07:05:34.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:34 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:05:34.753 INFO:journalctl@ceph.osd.5.vm05.stdout:Mar 10 07:05:34 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-5[54725]: 2026-03-10T07:05:34.467+0000 7fec28c74700 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.5 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-10T07:05:34.753 INFO:journalctl@ceph.osd.5.vm05.stdout:Mar 10 07:05:34 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-5[54725]: 2026-03-10T07:05:34.467+0000 7fec28c74700 -1 osd.5 111 *** Got signal Terminated *** 2026-03-10T07:05:34.753 INFO:journalctl@ceph.osd.5.vm05.stdout:Mar 10 07:05:34 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-5[54725]: 2026-03-10T07:05:34.467+0000 7fec28c74700 -1 osd.5 111 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-10T07:05:34.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:34 vm02 ceph-mon[87980]: pgmap v92: 161 pgs: 161 active+clean; 457 KiB data, 202 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:05:34.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:34 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["5"], "max": 16}]: dispatch 2026-03-10T07:05:34.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:34 vm02 ceph-mon[87980]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["5"], "max": 16}]: dispatch 2026-03-10T07:05:34.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:34 vm02 ceph-mon[87980]: Upgrade: osd.5 is safe to restart 2026-03-10T07:05:34.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:34 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:05:34.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:34 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.5"}]: dispatch 2026-03-10T07:05:34.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:34 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:05:34.834 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:34 vm02 ceph-mon[86149]: pgmap v92: 161 pgs: 161 active+clean; 457 KiB data, 202 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:05:34.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:34 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["5"], "max": 16}]: dispatch 2026-03-10T07:05:34.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:34 vm02 ceph-mon[86149]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["5"], "max": 16}]: dispatch 2026-03-10T07:05:34.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:34 vm02 ceph-mon[86149]: Upgrade: osd.5 is safe to restart 2026-03-10T07:05:34.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:34 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:05:34.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:34 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.5"}]: dispatch 2026-03-10T07:05:34.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:34 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:05:35.665 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:35 vm05 ceph-mon[76604]: Upgrade: Updating osd.5 2026-03-10T07:05:35.665 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:35 vm05 ceph-mon[76604]: Deploying daemon osd.5 on vm05 2026-03-10T07:05:35.665 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:35 vm05 ceph-mon[76604]: osd.5 marked itself down and dead 2026-03-10T07:05:35.668 INFO:journalctl@ceph.osd.5.vm05.stdout:Mar 10 07:05:35 vm05 podman[80494]: 2026-03-10 07:05:35.4802553 +0000 UTC m=+1.027988417 container died ef1a70593f89c3be7a2e9116364b48f8d91a24f1abd3ae3e70ee310dbd7430c3 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-5, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, GIT_REPO=https://github.com/ceph/ceph-container.git, architecture=x86_64, ceph=True, CEPH_POINT_RELEASE=-17.2.0, com.redhat.component=centos-stream-container, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, io.k8s.display-name=CentOS Stream 8, RELEASE=HEAD, build-date=2022-05-03T08:36:31.336870, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, maintainer=Guillaume Abrioux , vcs-type=git, vendor=Red Hat, Inc., GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.expose-services=, release=754, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, name=centos-stream, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, distribution-scope=public, io.openshift.tags=base centos centos-stream, io.buildah.version=1.19.8, version=8, GIT_CLEAN=True, GIT_BRANCH=HEAD) 2026-03-10T07:05:35.668 INFO:journalctl@ceph.osd.5.vm05.stdout:Mar 10 07:05:35 vm05 podman[80494]: 2026-03-10 07:05:35.498924733 +0000 UTC m=+1.046657850 container remove ef1a70593f89c3be7a2e9116364b48f8d91a24f1abd3ae3e70ee310dbd7430c3 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-5, com.redhat.component=centos-stream-container, io.k8s.display-name=CentOS Stream 8, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vendor=Red Hat, Inc., CEPH_POINT_RELEASE=-17.2.0, distribution-scope=public, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, GIT_REPO=https://github.com/ceph/ceph-container.git, ceph=True, io.openshift.expose-services=, build-date=2022-05-03T08:36:31.336870, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, io.buildah.version=1.19.8, GIT_BRANCH=HEAD, RELEASE=HEAD, maintainer=Guillaume Abrioux , architecture=x86_64, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, version=8, release=754, GIT_CLEAN=True, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, io.openshift.tags=base centos centos-stream, name=centos-stream, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, vcs-type=git, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac) 2026-03-10T07:05:35.668 INFO:journalctl@ceph.osd.5.vm05.stdout:Mar 10 07:05:35 vm05 bash[80494]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-5 2026-03-10T07:05:35.668 INFO:journalctl@ceph.osd.5.vm05.stdout:Mar 10 07:05:35 vm05 podman[80561]: 2026-03-10 07:05:35.640791443 +0000 UTC m=+0.017488554 container create a6f59929511e9ad40867a2e3cb7eda3a5ced8cc13593016f2b9e955167bfc0ee (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-5-deactivate, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=squid, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.build-date=20260223, ceph=True, io.buildah.version=1.41.3, org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-10T07:05:35.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:35 vm02 ceph-mon[87980]: Upgrade: Updating osd.5 2026-03-10T07:05:35.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:35 vm02 ceph-mon[87980]: Deploying daemon osd.5 on vm05 2026-03-10T07:05:35.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:35 vm02 ceph-mon[87980]: osd.5 marked itself down and dead 2026-03-10T07:05:35.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:35 vm02 ceph-mon[86149]: Upgrade: Updating osd.5 2026-03-10T07:05:35.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:35 vm02 ceph-mon[86149]: Deploying daemon osd.5 on vm05 2026-03-10T07:05:35.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:35 vm02 ceph-mon[86149]: osd.5 marked itself down and dead 2026-03-10T07:05:35.932 INFO:journalctl@ceph.osd.5.vm05.stdout:Mar 10 07:05:35 vm05 podman[80561]: 2026-03-10 07:05:35.675819536 +0000 UTC m=+0.052516647 container init a6f59929511e9ad40867a2e3cb7eda3a5ced8cc13593016f2b9e955167bfc0ee (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-5-deactivate, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team , ceph=True, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=squid, OSD_FLAVOR=default, org.label-schema.build-date=20260223, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git) 2026-03-10T07:05:35.933 INFO:journalctl@ceph.osd.5.vm05.stdout:Mar 10 07:05:35 vm05 podman[80561]: 2026-03-10 07:05:35.680349672 +0000 UTC m=+0.057046773 container start a6f59929511e9ad40867a2e3cb7eda3a5ced8cc13593016f2b9e955167bfc0ee (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-5-deactivate, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, org.label-schema.build-date=20260223, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team ) 2026-03-10T07:05:35.933 INFO:journalctl@ceph.osd.5.vm05.stdout:Mar 10 07:05:35 vm05 podman[80561]: 2026-03-10 07:05:35.681291675 +0000 UTC m=+0.057988786 container attach a6f59929511e9ad40867a2e3cb7eda3a5ced8cc13593016f2b9e955167bfc0ee (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-5-deactivate, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.build-date=20260223, CEPH_REF=squid, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.license=GPLv2) 2026-03-10T07:05:35.933 INFO:journalctl@ceph.osd.5.vm05.stdout:Mar 10 07:05:35 vm05 podman[80561]: 2026-03-10 07:05:35.633805672 +0000 UTC m=+0.010502794 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T07:05:35.933 INFO:journalctl@ceph.osd.5.vm05.stdout:Mar 10 07:05:35 vm05 conmon[80572]: conmon a6f59929511e9ad40867 : Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-a6f59929511e9ad40867a2e3cb7eda3a5ced8cc13593016f2b9e955167bfc0ee.scope/container/memory.events 2026-03-10T07:05:35.933 INFO:journalctl@ceph.osd.5.vm05.stdout:Mar 10 07:05:35 vm05 podman[80561]: 2026-03-10 07:05:35.822606694 +0000 UTC m=+0.199303805 container died a6f59929511e9ad40867a2e3cb7eda3a5ced8cc13593016f2b9e955167bfc0ee (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-5-deactivate, org.label-schema.build-date=20260223, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team ) 2026-03-10T07:05:35.933 INFO:journalctl@ceph.osd.5.vm05.stdout:Mar 10 07:05:35 vm05 podman[80561]: 2026-03-10 07:05:35.839465248 +0000 UTC m=+0.216162359 container remove a6f59929511e9ad40867a2e3cb7eda3a5ced8cc13593016f2b9e955167bfc0ee (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-5-deactivate, org.label-schema.schema-version=1.0, CEPH_REF=squid, io.buildah.version=1.41.3, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team , GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20260223, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git) 2026-03-10T07:05:35.933 INFO:journalctl@ceph.osd.5.vm05.stdout:Mar 10 07:05:35 vm05 systemd[1]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@osd.5.service: Deactivated successfully. 2026-03-10T07:05:35.933 INFO:journalctl@ceph.osd.5.vm05.stdout:Mar 10 07:05:35 vm05 systemd[1]: Stopped Ceph osd.5 for 28bd35e6-1c4e-11f1-9057-21b3549603fc. 2026-03-10T07:05:35.933 INFO:journalctl@ceph.osd.5.vm05.stdout:Mar 10 07:05:35 vm05 systemd[1]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@osd.5.service: Consumed 6.814s CPU time. 2026-03-10T07:05:36.419 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:36 vm05 ceph-mon[76604]: pgmap v93: 161 pgs: 161 active+clean; 457 KiB data, 202 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:05:36.419 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:36 vm05 ceph-mon[76604]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-10T07:05:36.419 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:36 vm05 ceph-mon[76604]: osdmap e112: 8 total, 7 up, 8 in 2026-03-10T07:05:36.419 INFO:journalctl@ceph.osd.5.vm05.stdout:Mar 10 07:05:36 vm05 systemd[1]: Starting Ceph osd.5 for 28bd35e6-1c4e-11f1-9057-21b3549603fc... 2026-03-10T07:05:36.419 INFO:journalctl@ceph.osd.5.vm05.stdout:Mar 10 07:05:36 vm05 podman[80664]: 2026-03-10 07:05:36.12379306 +0000 UTC m=+0.015918597 container create 6d30c31a04fd7c68ed445a9ebfea4557cb2ab6e3329044b124a1c813957e5ed0 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-5-activate, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team , ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.label-schema.build-date=20260223) 2026-03-10T07:05:36.419 INFO:journalctl@ceph.osd.5.vm05.stdout:Mar 10 07:05:36 vm05 podman[80664]: 2026-03-10 07:05:36.164464141 +0000 UTC m=+0.056589687 container init 6d30c31a04fd7c68ed445a9ebfea4557cb2ab6e3329044b124a1c813957e5ed0 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-5-activate, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.label-schema.build-date=20260223, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid, ceph=True, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-10T07:05:36.419 INFO:journalctl@ceph.osd.5.vm05.stdout:Mar 10 07:05:36 vm05 podman[80664]: 2026-03-10 07:05:36.168021428 +0000 UTC m=+0.060146975 container start 6d30c31a04fd7c68ed445a9ebfea4557cb2ab6e3329044b124a1c813957e5ed0 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-5-activate, CEPH_REF=squid, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, OSD_FLAVOR=default, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.build-date=20260223, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git) 2026-03-10T07:05:36.419 INFO:journalctl@ceph.osd.5.vm05.stdout:Mar 10 07:05:36 vm05 podman[80664]: 2026-03-10 07:05:36.173472918 +0000 UTC m=+0.065598464 container attach 6d30c31a04fd7c68ed445a9ebfea4557cb2ab6e3329044b124a1c813957e5ed0 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-5-activate, io.buildah.version=1.41.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20260223, ceph=True, org.label-schema.license=GPLv2, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid) 2026-03-10T07:05:36.419 INFO:journalctl@ceph.osd.5.vm05.stdout:Mar 10 07:05:36 vm05 podman[80664]: 2026-03-10 07:05:36.117599732 +0000 UTC m=+0.009725289 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T07:05:36.419 INFO:journalctl@ceph.osd.5.vm05.stdout:Mar 10 07:05:36 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-5-activate[80675]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T07:05:36.419 INFO:journalctl@ceph.osd.5.vm05.stdout:Mar 10 07:05:36 vm05 bash[80664]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T07:05:36.419 INFO:journalctl@ceph.osd.5.vm05.stdout:Mar 10 07:05:36 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-5-activate[80675]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T07:05:36.419 INFO:journalctl@ceph.osd.5.vm05.stdout:Mar 10 07:05:36 vm05 bash[80664]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T07:05:36.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:36 vm02 ceph-mon[87980]: pgmap v93: 161 pgs: 161 active+clean; 457 KiB data, 202 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:05:36.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:36 vm02 ceph-mon[87980]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-10T07:05:36.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:36 vm02 ceph-mon[87980]: osdmap e112: 8 total, 7 up, 8 in 2026-03-10T07:05:36.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:36 vm02 ceph-mon[86149]: pgmap v93: 161 pgs: 161 active+clean; 457 KiB data, 202 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:05:36.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:36 vm02 ceph-mon[86149]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-10T07:05:36.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:36 vm02 ceph-mon[86149]: osdmap e112: 8 total, 7 up, 8 in 2026-03-10T07:05:37.003 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:05:36 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:05:36.948Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm02\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"28bd35e6-1c4e-11f1-9057-21b3549603fc\", domainname=\"(none)\", instance=\"vm02\", job=\"node\", machine=\"x86_64\", nodename=\"vm02\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm02\", job=\"node\", machine=\"x86_64\", nodename=\"vm02\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T07:05:37.004 INFO:journalctl@ceph.osd.5.vm05.stdout:Mar 10 07:05:36 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-5-activate[80675]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-10T07:05:37.004 INFO:journalctl@ceph.osd.5.vm05.stdout:Mar 10 07:05:36 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-5-activate[80675]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T07:05:37.004 INFO:journalctl@ceph.osd.5.vm05.stdout:Mar 10 07:05:36 vm05 bash[80664]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-10T07:05:37.004 INFO:journalctl@ceph.osd.5.vm05.stdout:Mar 10 07:05:36 vm05 bash[80664]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T07:05:37.004 INFO:journalctl@ceph.osd.5.vm05.stdout:Mar 10 07:05:36 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-5-activate[80675]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T07:05:37.004 INFO:journalctl@ceph.osd.5.vm05.stdout:Mar 10 07:05:36 vm05 bash[80664]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T07:05:37.004 INFO:journalctl@ceph.osd.5.vm05.stdout:Mar 10 07:05:36 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-5-activate[80675]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-5 2026-03-10T07:05:37.004 INFO:journalctl@ceph.osd.5.vm05.stdout:Mar 10 07:05:36 vm05 bash[80664]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-5 2026-03-10T07:05:37.004 INFO:journalctl@ceph.osd.5.vm05.stdout:Mar 10 07:05:36 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-5-activate[80675]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-99755c0b-97dc-46ea-8159-04ab32ec496a/osd-block-4cf77df5-c5c7-4d20-b47b-ed4598f3fb56 --path /var/lib/ceph/osd/ceph-5 --no-mon-config 2026-03-10T07:05:37.004 INFO:journalctl@ceph.osd.5.vm05.stdout:Mar 10 07:05:36 vm05 bash[80664]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-99755c0b-97dc-46ea-8159-04ab32ec496a/osd-block-4cf77df5-c5c7-4d20-b47b-ed4598f3fb56 --path /var/lib/ceph/osd/ceph-5 --no-mon-config 2026-03-10T07:05:37.433 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:37 vm05 ceph-mon[76604]: from='client.15180 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:05:37.433 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:37 vm05 ceph-mon[76604]: osdmap e113: 8 total, 7 up, 8 in 2026-03-10T07:05:37.433 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:37 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:05:37.433 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:37 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:05:37.433 INFO:journalctl@ceph.osd.5.vm05.stdout:Mar 10 07:05:37 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-5-activate[80675]: Running command: /usr/bin/ln -snf /dev/ceph-99755c0b-97dc-46ea-8159-04ab32ec496a/osd-block-4cf77df5-c5c7-4d20-b47b-ed4598f3fb56 /var/lib/ceph/osd/ceph-5/block 2026-03-10T07:05:37.433 INFO:journalctl@ceph.osd.5.vm05.stdout:Mar 10 07:05:37 vm05 bash[80664]: Running command: /usr/bin/ln -snf /dev/ceph-99755c0b-97dc-46ea-8159-04ab32ec496a/osd-block-4cf77df5-c5c7-4d20-b47b-ed4598f3fb56 /var/lib/ceph/osd/ceph-5/block 2026-03-10T07:05:37.433 INFO:journalctl@ceph.osd.5.vm05.stdout:Mar 10 07:05:37 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-5-activate[80675]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-5/block 2026-03-10T07:05:37.433 INFO:journalctl@ceph.osd.5.vm05.stdout:Mar 10 07:05:37 vm05 bash[80664]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-5/block 2026-03-10T07:05:37.433 INFO:journalctl@ceph.osd.5.vm05.stdout:Mar 10 07:05:37 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-5-activate[80675]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-1 2026-03-10T07:05:37.433 INFO:journalctl@ceph.osd.5.vm05.stdout:Mar 10 07:05:37 vm05 bash[80664]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-1 2026-03-10T07:05:37.433 INFO:journalctl@ceph.osd.5.vm05.stdout:Mar 10 07:05:37 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-5-activate[80675]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-5 2026-03-10T07:05:37.433 INFO:journalctl@ceph.osd.5.vm05.stdout:Mar 10 07:05:37 vm05 bash[80664]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-5 2026-03-10T07:05:37.433 INFO:journalctl@ceph.osd.5.vm05.stdout:Mar 10 07:05:37 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-5-activate[80675]: --> ceph-volume lvm activate successful for osd ID: 5 2026-03-10T07:05:37.433 INFO:journalctl@ceph.osd.5.vm05.stdout:Mar 10 07:05:37 vm05 bash[80664]: --> ceph-volume lvm activate successful for osd ID: 5 2026-03-10T07:05:37.433 INFO:journalctl@ceph.osd.5.vm05.stdout:Mar 10 07:05:37 vm05 podman[80664]: 2026-03-10 07:05:37.097020011 +0000 UTC m=+0.989145557 container died 6d30c31a04fd7c68ed445a9ebfea4557cb2ab6e3329044b124a1c813957e5ed0 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-5-activate, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.build-date=20260223, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True, io.buildah.version=1.41.3) 2026-03-10T07:05:37.433 INFO:journalctl@ceph.osd.5.vm05.stdout:Mar 10 07:05:37 vm05 podman[80664]: 2026-03-10 07:05:37.145375591 +0000 UTC m=+1.037501137 container remove 6d30c31a04fd7c68ed445a9ebfea4557cb2ab6e3329044b124a1c813957e5ed0 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-5-activate, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20260223, io.buildah.version=1.41.3, org.label-schema.license=GPLv2) 2026-03-10T07:05:37.433 INFO:journalctl@ceph.osd.5.vm05.stdout:Mar 10 07:05:37 vm05 podman[80910]: 2026-03-10 07:05:37.241314485 +0000 UTC m=+0.016001361 container create 37ab69a8d103d624f93ea4d257d70726835cafb62026e492b9a03356ed073590 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-5, CEPH_REF=squid, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20260223, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, OSD_FLAVOR=default, ceph=True) 2026-03-10T07:05:37.433 INFO:journalctl@ceph.osd.5.vm05.stdout:Mar 10 07:05:37 vm05 podman[80910]: 2026-03-10 07:05:37.281690373 +0000 UTC m=+0.056377260 container init 37ab69a8d103d624f93ea4d257d70726835cafb62026e492b9a03356ed073590 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-5, ceph=True, org.opencontainers.image.authors=Ceph Release Team , GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, OSD_FLAVOR=default, io.buildah.version=1.41.3, CEPH_REF=squid, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.name=CentOS Stream 9 Base Image) 2026-03-10T07:05:37.434 INFO:journalctl@ceph.osd.5.vm05.stdout:Mar 10 07:05:37 vm05 podman[80910]: 2026-03-10 07:05:37.284940305 +0000 UTC m=+0.059627181 container start 37ab69a8d103d624f93ea4d257d70726835cafb62026e492b9a03356ed073590 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-5, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, CEPH_REF=squid, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.build-date=20260223, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-10T07:05:37.434 INFO:journalctl@ceph.osd.5.vm05.stdout:Mar 10 07:05:37 vm05 bash[80910]: 37ab69a8d103d624f93ea4d257d70726835cafb62026e492b9a03356ed073590 2026-03-10T07:05:37.434 INFO:journalctl@ceph.osd.5.vm05.stdout:Mar 10 07:05:37 vm05 podman[80910]: 2026-03-10 07:05:37.234126305 +0000 UTC m=+0.008813181 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T07:05:37.434 INFO:journalctl@ceph.osd.5.vm05.stdout:Mar 10 07:05:37 vm05 systemd[1]: Started Ceph osd.5 for 28bd35e6-1c4e-11f1-9057-21b3549603fc. 2026-03-10T07:05:37.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:37 vm02 ceph-mon[87980]: from='client.15180 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:05:37.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:37 vm02 ceph-mon[87980]: osdmap e113: 8 total, 7 up, 8 in 2026-03-10T07:05:37.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:37 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:05:37.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:37 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:05:37.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:37 vm02 ceph-mon[86149]: from='client.15180 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:05:37.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:37 vm02 ceph-mon[86149]: osdmap e113: 8 total, 7 up, 8 in 2026-03-10T07:05:37.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:37 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:05:37.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:37 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:05:38.159 INFO:teuthology.orchestra.run.vm02.stdout:true 2026-03-10T07:05:38.434 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:38 vm02 ceph-mon[86149]: pgmap v96: 161 pgs: 16 active+undersized, 15 peering, 6 stale+active+clean, 7 active+undersized+degraded, 117 active+clean; 457 KiB data, 202 MiB used, 160 GiB / 160 GiB avail; 30/630 objects degraded (4.762%) 2026-03-10T07:05:38.435 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:38 vm02 ceph-mon[86149]: Health check failed: Reduced data availability: 3 pgs inactive, 4 pgs peering (PG_AVAILABILITY) 2026-03-10T07:05:38.435 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:38 vm02 ceph-mon[86149]: Health check failed: Degraded data redundancy: 30/630 objects degraded (4.762%), 7 pgs degraded (PG_DEGRADED) 2026-03-10T07:05:38.435 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:38 vm02 ceph-mon[87980]: pgmap v96: 161 pgs: 16 active+undersized, 15 peering, 6 stale+active+clean, 7 active+undersized+degraded, 117 active+clean; 457 KiB data, 202 MiB used, 160 GiB / 160 GiB avail; 30/630 objects degraded (4.762%) 2026-03-10T07:05:38.435 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:38 vm02 ceph-mon[87980]: Health check failed: Reduced data availability: 3 pgs inactive, 4 pgs peering (PG_AVAILABILITY) 2026-03-10T07:05:38.435 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:38 vm02 ceph-mon[87980]: Health check failed: Degraded data redundancy: 30/630 objects degraded (4.762%), 7 pgs degraded (PG_DEGRADED) 2026-03-10T07:05:38.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:38 vm05 ceph-mon[76604]: pgmap v96: 161 pgs: 16 active+undersized, 15 peering, 6 stale+active+clean, 7 active+undersized+degraded, 117 active+clean; 457 KiB data, 202 MiB used, 160 GiB / 160 GiB avail; 30/630 objects degraded (4.762%) 2026-03-10T07:05:38.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:38 vm05 ceph-mon[76604]: Health check failed: Reduced data availability: 3 pgs inactive, 4 pgs peering (PG_AVAILABILITY) 2026-03-10T07:05:38.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:38 vm05 ceph-mon[76604]: Health check failed: Degraded data redundancy: 30/630 objects degraded (4.762%), 7 pgs degraded (PG_DEGRADED) 2026-03-10T07:05:38.589 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T07:05:38.590 INFO:teuthology.orchestra.run.vm02.stdout:alertmanager.a vm02 *:9093,9094 running (2m) 43s ago 7m 22.5M - 0.25.0 c8568f914cd2 520cbcc5ad98 2026-03-10T07:05:38.590 INFO:teuthology.orchestra.run.vm02.stdout:grafana.a vm05 *:3000 running (2m) 0s ago 6m 44.9M - dad864ee21e9 34567dcb4b51 2026-03-10T07:05:38.590 INFO:teuthology.orchestra.run.vm02.stdout:iscsi.foo.vm02.iphfbm vm02 running (2m) 43s ago 6m 52.0M - 3.5 e1d6a67b021e a06caff18850 2026-03-10T07:05:38.590 INFO:teuthology.orchestra.run.vm02.stdout:mgr.x vm05 *:8443,9283,8765 running (2m) 0s ago 8m 486M - 19.2.3-678-ge911bdeb 654f31e6858e cdd4c8db2d43 2026-03-10T07:05:38.590 INFO:teuthology.orchestra.run.vm02.stdout:mgr.y vm02 *:8443,9283,8765 running (2m) 43s ago 9m 554M - 19.2.3-678-ge911bdeb 654f31e6858e e1dc22afcf31 2026-03-10T07:05:38.590 INFO:teuthology.orchestra.run.vm02.stdout:mon.a vm02 running (2m) 43s ago 9m 48.7M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 194e84dd73c4 2026-03-10T07:05:38.590 INFO:teuthology.orchestra.run.vm02.stdout:mon.b vm05 running (2m) 0s ago 8m 37.1M 2048M 19.2.3-678-ge911bdeb 654f31e6858e e901cead026d 2026-03-10T07:05:38.590 INFO:teuthology.orchestra.run.vm02.stdout:mon.c vm02 running (2m) 43s ago 8m 42.5M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 959c5054b3d9 2026-03-10T07:05:38.590 INFO:teuthology.orchestra.run.vm02.stdout:node-exporter.a vm02 *:9100 running (2m) 43s ago 7m 9424k - 1.7.0 72c9c2088986 0bc0b34c732a 2026-03-10T07:05:38.590 INFO:teuthology.orchestra.run.vm02.stdout:node-exporter.b vm05 *:9100 running (2m) 0s ago 7m 9.85M - 1.7.0 72c9c2088986 0129ed456f9d 2026-03-10T07:05:38.590 INFO:teuthology.orchestra.run.vm02.stdout:osd.0 vm02 running (110s) 43s ago 8m 69.0M 4096M 19.2.3-678-ge911bdeb 654f31e6858e bf86ac25f7fb 2026-03-10T07:05:38.590 INFO:teuthology.orchestra.run.vm02.stdout:osd.1 vm02 running (89s) 43s ago 8m 47.5M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 6a939dbe6adc 2026-03-10T07:05:38.590 INFO:teuthology.orchestra.run.vm02.stdout:osd.2 vm02 running (67s) 43s ago 8m 44.7M 4096M 19.2.3-678-ge911bdeb 654f31e6858e b0f12ddcd6b3 2026-03-10T07:05:38.590 INFO:teuthology.orchestra.run.vm02.stdout:osd.3 vm02 running (45s) 43s ago 8m 12.7M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 396c050a0b4f 2026-03-10T07:05:38.590 INFO:teuthology.orchestra.run.vm02.stdout:osd.4 vm05 running (23s) 0s ago 7m 49.1M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 05476fb1f9d8 2026-03-10T07:05:38.590 INFO:teuthology.orchestra.run.vm02.stdout:osd.5 vm05 running (1s) 0s ago 7m 15.2M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 37ab69a8d103 2026-03-10T07:05:38.590 INFO:teuthology.orchestra.run.vm02.stdout:osd.6 vm05 running (7m) 0s ago 7m 57.2M 4096M 17.2.0 e1d6a67b021e 9f83e32d5eb6 2026-03-10T07:05:38.590 INFO:teuthology.orchestra.run.vm02.stdout:osd.7 vm05 running (7m) 0s ago 7m 57.2M 4096M 17.2.0 e1d6a67b021e 83d454094982 2026-03-10T07:05:38.590 INFO:teuthology.orchestra.run.vm02.stdout:prometheus.a vm05 *:9095 running (2m) 0s ago 7m 44.2M - 2.51.0 1d3b7f56885b 2f14ff887bc7 2026-03-10T07:05:38.590 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.kkmsll vm02 *:8000 running (6m) 43s ago 6m 94.2M - 17.2.0 e1d6a67b021e ef903c439808 2026-03-10T07:05:38.590 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm05.bmslvs vm05 *:8000 running (6m) 0s ago 6m 94.3M - 17.2.0 e1d6a67b021e acd35f4810d9 2026-03-10T07:05:38.590 INFO:teuthology.orchestra.run.vm02.stdout:rgw.smpl.vm02.kyvfxo vm02 *:80 running (6m) 43s ago 6m 92.1M - 17.2.0 e1d6a67b021e 6c68381e5378 2026-03-10T07:05:38.590 INFO:teuthology.orchestra.run.vm02.stdout:rgw.smpl.vm05.xjafam vm05 *:80 running (6m) 0s ago 6m 94.2M - 17.2.0 e1d6a67b021e 62a81876b05e 2026-03-10T07:05:38.832 INFO:teuthology.orchestra.run.vm02.stdout:{ 2026-03-10T07:05:38.833 INFO:teuthology.orchestra.run.vm02.stdout: "mon": { 2026-03-10T07:05:38.833 INFO:teuthology.orchestra.run.vm02.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 3 2026-03-10T07:05:38.833 INFO:teuthology.orchestra.run.vm02.stdout: }, 2026-03-10T07:05:38.833 INFO:teuthology.orchestra.run.vm02.stdout: "mgr": { 2026-03-10T07:05:38.833 INFO:teuthology.orchestra.run.vm02.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-10T07:05:38.833 INFO:teuthology.orchestra.run.vm02.stdout: }, 2026-03-10T07:05:38.833 INFO:teuthology.orchestra.run.vm02.stdout: "osd": { 2026-03-10T07:05:38.833 INFO:teuthology.orchestra.run.vm02.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 2, 2026-03-10T07:05:38.833 INFO:teuthology.orchestra.run.vm02.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 5 2026-03-10T07:05:38.833 INFO:teuthology.orchestra.run.vm02.stdout: }, 2026-03-10T07:05:38.833 INFO:teuthology.orchestra.run.vm02.stdout: "rgw": { 2026-03-10T07:05:38.833 INFO:teuthology.orchestra.run.vm02.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 4 2026-03-10T07:05:38.833 INFO:teuthology.orchestra.run.vm02.stdout: }, 2026-03-10T07:05:38.833 INFO:teuthology.orchestra.run.vm02.stdout: "overall": { 2026-03-10T07:05:38.833 INFO:teuthology.orchestra.run.vm02.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 6, 2026-03-10T07:05:38.833 INFO:teuthology.orchestra.run.vm02.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 10 2026-03-10T07:05:38.833 INFO:teuthology.orchestra.run.vm02.stdout: } 2026-03-10T07:05:38.833 INFO:teuthology.orchestra.run.vm02.stdout:} 2026-03-10T07:05:39.003 INFO:journalctl@ceph.osd.5.vm05.stdout:Mar 10 07:05:38 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-5[80920]: 2026-03-10T07:05:38.625+0000 7fed32e0c740 -1 Falling back to public interface 2026-03-10T07:05:39.067 INFO:teuthology.orchestra.run.vm02.stdout:{ 2026-03-10T07:05:39.067 INFO:teuthology.orchestra.run.vm02.stdout: "target_image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", 2026-03-10T07:05:39.067 INFO:teuthology.orchestra.run.vm02.stdout: "in_progress": true, 2026-03-10T07:05:39.067 INFO:teuthology.orchestra.run.vm02.stdout: "which": "Upgrading all daemon types on all hosts", 2026-03-10T07:05:39.067 INFO:teuthology.orchestra.run.vm02.stdout: "services_complete": [ 2026-03-10T07:05:39.067 INFO:teuthology.orchestra.run.vm02.stdout: "mgr", 2026-03-10T07:05:39.067 INFO:teuthology.orchestra.run.vm02.stdout: "mon" 2026-03-10T07:05:39.067 INFO:teuthology.orchestra.run.vm02.stdout: ], 2026-03-10T07:05:39.067 INFO:teuthology.orchestra.run.vm02.stdout: "progress": "11/23 daemons upgraded", 2026-03-10T07:05:39.067 INFO:teuthology.orchestra.run.vm02.stdout: "message": "Currently upgrading osd daemons", 2026-03-10T07:05:39.067 INFO:teuthology.orchestra.run.vm02.stdout: "is_paused": false 2026-03-10T07:05:39.067 INFO:teuthology.orchestra.run.vm02.stdout:} 2026-03-10T07:05:39.323 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 osds down; Reduced data availability: 3 pgs inactive, 4 pgs peering; Degraded data redundancy: 30/630 objects degraded (4.762%), 7 pgs degraded 2026-03-10T07:05:39.323 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] OSD_DOWN: 1 osds down 2026-03-10T07:05:39.323 INFO:teuthology.orchestra.run.vm02.stdout: osd.5 (root=default,host=vm05) is down 2026-03-10T07:05:39.323 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] PG_AVAILABILITY: Reduced data availability: 3 pgs inactive, 4 pgs peering 2026-03-10T07:05:39.323 INFO:teuthology.orchestra.run.vm02.stdout: pg 2.16 is stuck peering for 63s, current state peering, last acting [6,2] 2026-03-10T07:05:39.323 INFO:teuthology.orchestra.run.vm02.stdout: pg 3.16 is stuck peering for 6m, current state peering, last acting [7,1] 2026-03-10T07:05:39.323 INFO:teuthology.orchestra.run.vm02.stdout: pg 4.8 is stuck peering for 6m, current state peering, last acting [7,6] 2026-03-10T07:05:39.323 INFO:teuthology.orchestra.run.vm02.stdout: pg 6.a is stuck peering for 106s, current state peering, last acting [6,0] 2026-03-10T07:05:39.323 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] PG_DEGRADED: Degraded data redundancy: 30/630 objects degraded (4.762%), 7 pgs degraded 2026-03-10T07:05:39.323 INFO:teuthology.orchestra.run.vm02.stdout: pg 2.12 is active+undersized+degraded, acting [3,7] 2026-03-10T07:05:39.323 INFO:teuthology.orchestra.run.vm02.stdout: pg 2.1e is active+undersized+degraded, acting [3,0] 2026-03-10T07:05:39.323 INFO:teuthology.orchestra.run.vm02.stdout: pg 3.2 is active+undersized+degraded, acting [3,6] 2026-03-10T07:05:39.323 INFO:teuthology.orchestra.run.vm02.stdout: pg 3.4 is active+undersized+degraded, acting [1,2] 2026-03-10T07:05:39.323 INFO:teuthology.orchestra.run.vm02.stdout: pg 3.5 is active+undersized+degraded, acting [3,2] 2026-03-10T07:05:39.323 INFO:teuthology.orchestra.run.vm02.stdout: pg 3.c is active+undersized+degraded, acting [3,6] 2026-03-10T07:05:39.323 INFO:teuthology.orchestra.run.vm02.stdout: pg 6.c is active+undersized+degraded, acting [3,6] 2026-03-10T07:05:39.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:39 vm02 ceph-mon[87980]: from='client.44195 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:05:39.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:39 vm02 ceph-mon[87980]: from='client.34284 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:05:39.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:39 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:05:39.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:39 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:05:39.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:39 vm02 ceph-mon[87980]: from='client.34290 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:05:39.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:39 vm02 ceph-mon[87980]: from='client.? 192.168.123.102:0/2819256557' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:05:39.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:39 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:05:39.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:39 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:05:39.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:39 vm02 ceph-mon[87980]: from='client.? 192.168.123.102:0/3544545513' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T07:05:39.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:39 vm02 ceph-mon[86149]: from='client.44195 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:05:39.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:39 vm02 ceph-mon[86149]: from='client.34284 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:05:39.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:39 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:05:39.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:39 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:05:39.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:39 vm02 ceph-mon[86149]: from='client.34290 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:05:39.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:39 vm02 ceph-mon[86149]: from='client.? 192.168.123.102:0/2819256557' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:05:39.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:39 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:05:39.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:39 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:05:39.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:39 vm02 ceph-mon[86149]: from='client.? 192.168.123.102:0/3544545513' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T07:05:39.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:39 vm05 ceph-mon[76604]: from='client.44195 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:05:39.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:39 vm05 ceph-mon[76604]: from='client.34284 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:05:39.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:39 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:05:39.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:39 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:05:39.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:39 vm05 ceph-mon[76604]: from='client.34290 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:05:39.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:39 vm05 ceph-mon[76604]: from='client.? 192.168.123.102:0/2819256557' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:05:39.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:39 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:05:39.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:39 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:05:39.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:39 vm05 ceph-mon[76604]: from='client.? 192.168.123.102:0/3544545513' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T07:05:39.754 INFO:journalctl@ceph.osd.5.vm05.stdout:Mar 10 07:05:39 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-5[80920]: 2026-03-10T07:05:39.487+0000 7fed32e0c740 -1 osd.5 0 read_superblock omap replica is missing. 2026-03-10T07:05:39.754 INFO:journalctl@ceph.osd.5.vm05.stdout:Mar 10 07:05:39 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-5[80920]: 2026-03-10T07:05:39.508+0000 7fed32e0c740 -1 osd.5 111 log_to_monitors true 2026-03-10T07:05:40.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:40 vm02 ceph-mon[87980]: pgmap v97: 161 pgs: 28 active+undersized, 15 peering, 1 stale+active+clean, 10 active+undersized+degraded, 107 active+clean; 457 KiB data, 202 MiB used, 160 GiB / 160 GiB avail; 44/630 objects degraded (6.984%) 2026-03-10T07:05:40.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:40 vm02 ceph-mon[87980]: from='client.34302 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:05:40.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:40 vm02 ceph-mon[87980]: from='osd.5 [v2:192.168.123.105:6808/1571343793,v1:192.168.123.105:6809/1571343793]' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-10T07:05:40.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:40 vm02 ceph-mon[87980]: from='osd.5 ' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-10T07:05:40.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:40 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:05:40.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:40 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T07:05:40.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:40 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:05:40.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:40 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:05:40.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:40 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:05:40.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:40 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T07:05:40.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:40 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:05:40.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:40 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T07:05:40.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:40 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:05:40.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:40 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:05:40.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:40 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:05:40.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:40 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["6"], "max": 16}]: dispatch 2026-03-10T07:05:40.585 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:05:40 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: 2026-03-10T07:05:40.220+0000 7f03c12b6640 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (18 PGs are or would become offline) 2026-03-10T07:05:40.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:40 vm02 ceph-mon[86149]: pgmap v97: 161 pgs: 28 active+undersized, 15 peering, 1 stale+active+clean, 10 active+undersized+degraded, 107 active+clean; 457 KiB data, 202 MiB used, 160 GiB / 160 GiB avail; 44/630 objects degraded (6.984%) 2026-03-10T07:05:40.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:40 vm02 ceph-mon[86149]: from='client.34302 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:05:40.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:40 vm02 ceph-mon[86149]: from='osd.5 [v2:192.168.123.105:6808/1571343793,v1:192.168.123.105:6809/1571343793]' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-10T07:05:40.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:40 vm02 ceph-mon[86149]: from='osd.5 ' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-10T07:05:40.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:40 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:05:40.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:40 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T07:05:40.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:40 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:05:40.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:40 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:05:40.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:40 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:05:40.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:40 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T07:05:40.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:40 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:05:40.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:40 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T07:05:40.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:40 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:05:40.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:40 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:05:40.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:40 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:05:40.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:40 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["6"], "max": 16}]: dispatch 2026-03-10T07:05:40.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:40 vm05 ceph-mon[76604]: pgmap v97: 161 pgs: 28 active+undersized, 15 peering, 1 stale+active+clean, 10 active+undersized+degraded, 107 active+clean; 457 KiB data, 202 MiB used, 160 GiB / 160 GiB avail; 44/630 objects degraded (6.984%) 2026-03-10T07:05:40.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:40 vm05 ceph-mon[76604]: from='client.34302 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:05:40.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:40 vm05 ceph-mon[76604]: from='osd.5 [v2:192.168.123.105:6808/1571343793,v1:192.168.123.105:6809/1571343793]' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-10T07:05:40.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:40 vm05 ceph-mon[76604]: from='osd.5 ' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-10T07:05:40.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:40 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:05:40.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:40 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T07:05:40.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:40 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:05:40.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:40 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:05:40.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:40 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:05:40.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:40 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T07:05:40.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:40 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:05:40.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:40 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T07:05:40.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:40 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:05:40.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:40 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:05:40.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:40 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:05:40.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:40 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["6"], "max": 16}]: dispatch 2026-03-10T07:05:41.753 INFO:journalctl@ceph.osd.5.vm05.stdout:Mar 10 07:05:41 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-5[80920]: 2026-03-10T07:05:41.407+0000 7fed2a3b6640 -1 osd.5 111 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-10T07:05:41.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:41 vm05 ceph-mon[76604]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["6"], "max": 16}]: dispatch 2026-03-10T07:05:41.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:41 vm05 ceph-mon[76604]: Upgrade: unsafe to stop osd(s) at this time (18 PGs are or would become offline) 2026-03-10T07:05:41.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:41 vm05 ceph-mon[76604]: from='osd.5 ' entity='osd.5' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]': finished 2026-03-10T07:05:41.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:41 vm05 ceph-mon[76604]: from='osd.5 [v2:192.168.123.105:6808/1571343793,v1:192.168.123.105:6809/1571343793]' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm05", "root=default"]}]: dispatch 2026-03-10T07:05:41.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:41 vm05 ceph-mon[76604]: osdmap e114: 8 total, 7 up, 8 in 2026-03-10T07:05:41.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:41 vm05 ceph-mon[76604]: from='osd.5 ' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm05", "root=default"]}]: dispatch 2026-03-10T07:05:41.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:41 vm02 ceph-mon[87980]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["6"], "max": 16}]: dispatch 2026-03-10T07:05:41.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:41 vm02 ceph-mon[87980]: Upgrade: unsafe to stop osd(s) at this time (18 PGs are or would become offline) 2026-03-10T07:05:41.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:41 vm02 ceph-mon[87980]: from='osd.5 ' entity='osd.5' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]': finished 2026-03-10T07:05:41.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:41 vm02 ceph-mon[87980]: from='osd.5 [v2:192.168.123.105:6808/1571343793,v1:192.168.123.105:6809/1571343793]' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm05", "root=default"]}]: dispatch 2026-03-10T07:05:41.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:41 vm02 ceph-mon[87980]: osdmap e114: 8 total, 7 up, 8 in 2026-03-10T07:05:41.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:41 vm02 ceph-mon[87980]: from='osd.5 ' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm05", "root=default"]}]: dispatch 2026-03-10T07:05:41.835 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:05:41 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: ::ffff:192.168.123.105 - - [10/Mar/2026:07:05:41] "GET /metrics HTTP/1.1" 200 38082 "" "Prometheus/2.51.0" 2026-03-10T07:05:41.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:41 vm02 ceph-mon[86149]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["6"], "max": 16}]: dispatch 2026-03-10T07:05:41.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:41 vm02 ceph-mon[86149]: Upgrade: unsafe to stop osd(s) at this time (18 PGs are or would become offline) 2026-03-10T07:05:41.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:41 vm02 ceph-mon[86149]: from='osd.5 ' entity='osd.5' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]': finished 2026-03-10T07:05:41.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:41 vm02 ceph-mon[86149]: from='osd.5 [v2:192.168.123.105:6808/1571343793,v1:192.168.123.105:6809/1571343793]' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm05", "root=default"]}]: dispatch 2026-03-10T07:05:41.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:41 vm02 ceph-mon[86149]: osdmap e114: 8 total, 7 up, 8 in 2026-03-10T07:05:41.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:41 vm02 ceph-mon[86149]: from='osd.5 ' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm05", "root=default"]}]: dispatch 2026-03-10T07:05:42.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:42 vm02 ceph-mon[87980]: pgmap v99: 161 pgs: 39 active+undersized, 20 active+undersized+degraded, 102 active+clean; 457 KiB data, 240 MiB used, 160 GiB / 160 GiB avail; 71/630 objects degraded (11.270%) 2026-03-10T07:05:42.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:42 vm02 ceph-mon[87980]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-10T07:05:42.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:42 vm02 ceph-mon[87980]: Health check cleared: PG_AVAILABILITY (was: Reduced data availability: 3 pgs inactive, 4 pgs peering) 2026-03-10T07:05:42.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:42 vm02 ceph-mon[87980]: osd.5 [v2:192.168.123.105:6808/1571343793,v1:192.168.123.105:6809/1571343793] boot 2026-03-10T07:05:42.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:42 vm02 ceph-mon[87980]: osdmap e115: 8 total, 8 up, 8 in 2026-03-10T07:05:42.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:42 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T07:05:42.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:42 vm02 ceph-mon[87980]: osdmap e116: 8 total, 8 up, 8 in 2026-03-10T07:05:42.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:42 vm02 ceph-mon[86149]: pgmap v99: 161 pgs: 39 active+undersized, 20 active+undersized+degraded, 102 active+clean; 457 KiB data, 240 MiB used, 160 GiB / 160 GiB avail; 71/630 objects degraded (11.270%) 2026-03-10T07:05:42.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:42 vm02 ceph-mon[86149]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-10T07:05:42.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:42 vm02 ceph-mon[86149]: Health check cleared: PG_AVAILABILITY (was: Reduced data availability: 3 pgs inactive, 4 pgs peering) 2026-03-10T07:05:42.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:42 vm02 ceph-mon[86149]: osd.5 [v2:192.168.123.105:6808/1571343793,v1:192.168.123.105:6809/1571343793] boot 2026-03-10T07:05:42.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:42 vm02 ceph-mon[86149]: osdmap e115: 8 total, 8 up, 8 in 2026-03-10T07:05:42.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:42 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T07:05:42.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:42 vm02 ceph-mon[86149]: osdmap e116: 8 total, 8 up, 8 in 2026-03-10T07:05:43.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:42 vm05 ceph-mon[76604]: pgmap v99: 161 pgs: 39 active+undersized, 20 active+undersized+degraded, 102 active+clean; 457 KiB data, 240 MiB used, 160 GiB / 160 GiB avail; 71/630 objects degraded (11.270%) 2026-03-10T07:05:43.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:42 vm05 ceph-mon[76604]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-10T07:05:43.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:42 vm05 ceph-mon[76604]: Health check cleared: PG_AVAILABILITY (was: Reduced data availability: 3 pgs inactive, 4 pgs peering) 2026-03-10T07:05:43.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:42 vm05 ceph-mon[76604]: osd.5 [v2:192.168.123.105:6808/1571343793,v1:192.168.123.105:6809/1571343793] boot 2026-03-10T07:05:43.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:42 vm05 ceph-mon[76604]: osdmap e115: 8 total, 8 up, 8 in 2026-03-10T07:05:43.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:42 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T07:05:43.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:42 vm05 ceph-mon[76604]: osdmap e116: 8 total, 8 up, 8 in 2026-03-10T07:05:43.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:43 vm05 ceph-mon[76604]: OSD bench result of 33077.890062 IOPS is not within the threshold limit range of 50.000000 IOPS and 500.000000 IOPS for osd.5. IOPS capacity is unchanged at 315.000000 IOPS. The recommendation is to establish the osd's IOPS capacity using other benchmark tools (e.g. Fio) and then override osd_mclock_max_capacity_iops_[hdd|ssd]. 2026-03-10T07:05:43.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:43 vm02 ceph-mon[87980]: OSD bench result of 33077.890062 IOPS is not within the threshold limit range of 50.000000 IOPS and 500.000000 IOPS for osd.5. IOPS capacity is unchanged at 315.000000 IOPS. The recommendation is to establish the osd's IOPS capacity using other benchmark tools (e.g. Fio) and then override osd_mclock_max_capacity_iops_[hdd|ssd]. 2026-03-10T07:05:43.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:43 vm02 ceph-mon[86149]: OSD bench result of 33077.890062 IOPS is not within the threshold limit range of 50.000000 IOPS and 500.000000 IOPS for osd.5. IOPS capacity is unchanged at 315.000000 IOPS. The recommendation is to establish the osd's IOPS capacity using other benchmark tools (e.g. Fio) and then override osd_mclock_max_capacity_iops_[hdd|ssd]. 2026-03-10T07:05:44.480 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:05:44 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:05:44.146Z caller=alerting.go:391 level=warn component="rule manager" alert="unsupported value type" msg="Expanding alert template failed" err="error executing template __alert_CephOSDDown: template: __alert_CephOSDDown:1:358: executing \"__alert_CephOSDDown\" at : error calling query: found duplicate series for the match group {ceph_daemon=\"osd.5\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.5\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"28bd35e6-1c4e-11f1-9057-21b3549603fc\", cluster_addr=\"192.168.123.105\", device_class=\"hdd\", hostname=\"vm05\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.105\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.5\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.105\", device_class=\"hdd\", hostname=\"vm05\", instance=\"192.168.123.105:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.105\"}];many-to-many matching not allowed: matching labels must be unique on one side" data="unsupported value type" 2026-03-10T07:05:44.480 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:05:44 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:05:44.147Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.5\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.5\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"28bd35e6-1c4e-11f1-9057-21b3549603fc\", cluster_addr=\"192.168.123.105\", device_class=\"hdd\", hostname=\"vm05\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.105\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.5\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.105\", device_class=\"hdd\", hostname=\"vm05\", instance=\"192.168.123.105:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.105\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T07:05:44.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:44 vm05 ceph-mon[76604]: pgmap v102: 161 pgs: 13 peering, 26 active+undersized, 15 active+undersized+degraded, 107 active+clean; 457 KiB data, 244 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 0 op/s; 53/630 objects degraded (8.413%) 2026-03-10T07:05:44.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:44 vm05 ceph-mon[76604]: Health check update: Degraded data redundancy: 53/630 objects degraded (8.413%), 15 pgs degraded (PG_DEGRADED) 2026-03-10T07:05:44.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:44 vm02 ceph-mon[87980]: pgmap v102: 161 pgs: 13 peering, 26 active+undersized, 15 active+undersized+degraded, 107 active+clean; 457 KiB data, 244 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 0 op/s; 53/630 objects degraded (8.413%) 2026-03-10T07:05:44.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:44 vm02 ceph-mon[87980]: Health check update: Degraded data redundancy: 53/630 objects degraded (8.413%), 15 pgs degraded (PG_DEGRADED) 2026-03-10T07:05:44.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:44 vm02 ceph-mon[86149]: pgmap v102: 161 pgs: 13 peering, 26 active+undersized, 15 active+undersized+degraded, 107 active+clean; 457 KiB data, 244 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 0 op/s; 53/630 objects degraded (8.413%) 2026-03-10T07:05:44.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:44 vm02 ceph-mon[86149]: Health check update: Degraded data redundancy: 53/630 objects degraded (8.413%), 15 pgs degraded (PG_DEGRADED) 2026-03-10T07:05:46.794 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:46 vm02 ceph-mon[87980]: pgmap v103: 161 pgs: 13 peering, 21 active+undersized, 15 active+undersized+degraded, 112 active+clean; 457 KiB data, 244 MiB used, 160 GiB / 160 GiB avail; 511 B/s rd, 0 op/s; 53/630 objects degraded (8.413%) 2026-03-10T07:05:46.794 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:46 vm02 ceph-mon[86149]: pgmap v103: 161 pgs: 13 peering, 21 active+undersized, 15 active+undersized+degraded, 112 active+clean; 457 KiB data, 244 MiB used, 160 GiB / 160 GiB avail; 511 B/s rd, 0 op/s; 53/630 objects degraded (8.413%) 2026-03-10T07:05:46.830 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:46 vm05 ceph-mon[76604]: pgmap v103: 161 pgs: 13 peering, 21 active+undersized, 15 active+undersized+degraded, 112 active+clean; 457 KiB data, 244 MiB used, 160 GiB / 160 GiB avail; 511 B/s rd, 0 op/s; 53/630 objects degraded (8.413%) 2026-03-10T07:05:47.253 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:05:46 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:05:46.947Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm02\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"28bd35e6-1c4e-11f1-9057-21b3549603fc\", domainname=\"(none)\", instance=\"vm02\", job=\"node\", machine=\"x86_64\", nodename=\"vm02\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm02\", job=\"node\", machine=\"x86_64\", nodename=\"vm02\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T07:05:47.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:47 vm02 ceph-mon[87980]: from='client.15180 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:05:47.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:47 vm02 ceph-mon[86149]: from='client.15180 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:05:48.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:47 vm05 ceph-mon[76604]: from='client.15180 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:05:48.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:48 vm02 ceph-mon[87980]: pgmap v104: 161 pgs: 161 active+clean; 457 KiB data, 225 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:05:48.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:48 vm02 ceph-mon[87980]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 53/630 objects degraded (8.413%), 15 pgs degraded) 2026-03-10T07:05:48.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:48 vm02 ceph-mon[87980]: Cluster is now healthy 2026-03-10T07:05:48.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:48 vm02 ceph-mon[86149]: pgmap v104: 161 pgs: 161 active+clean; 457 KiB data, 225 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:05:48.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:48 vm02 ceph-mon[86149]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 53/630 objects degraded (8.413%), 15 pgs degraded) 2026-03-10T07:05:48.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:48 vm02 ceph-mon[86149]: Cluster is now healthy 2026-03-10T07:05:49.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:48 vm05 ceph-mon[76604]: pgmap v104: 161 pgs: 161 active+clean; 457 KiB data, 225 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:05:49.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:48 vm05 ceph-mon[76604]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 53/630 objects degraded (8.413%), 15 pgs degraded) 2026-03-10T07:05:49.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:48 vm05 ceph-mon[76604]: Cluster is now healthy 2026-03-10T07:05:51.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:50 vm05 ceph-mon[76604]: pgmap v105: 161 pgs: 161 active+clean; 457 KiB data, 225 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-10T07:05:51.084 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:50 vm02 ceph-mon[87980]: pgmap v105: 161 pgs: 161 active+clean; 457 KiB data, 225 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-10T07:05:51.084 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:50 vm02 ceph-mon[86149]: pgmap v105: 161 pgs: 161 active+clean; 457 KiB data, 225 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-10T07:05:52.084 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:05:51 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: ::ffff:192.168.123.105 - - [10/Mar/2026:07:05:51] "GET /metrics HTTP/1.1" 200 38082 "" "Prometheus/2.51.0" 2026-03-10T07:05:53.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:52 vm05 ceph-mon[76604]: pgmap v106: 161 pgs: 161 active+clean; 457 KiB data, 225 MiB used, 160 GiB / 160 GiB avail; 855 B/s rd, 0 op/s 2026-03-10T07:05:53.084 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:52 vm02 ceph-mon[87980]: pgmap v106: 161 pgs: 161 active+clean; 457 KiB data, 225 MiB used, 160 GiB / 160 GiB avail; 855 B/s rd, 0 op/s 2026-03-10T07:05:53.084 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:52 vm02 ceph-mon[86149]: pgmap v106: 161 pgs: 161 active+clean; 457 KiB data, 225 MiB used, 160 GiB / 160 GiB avail; 855 B/s rd, 0 op/s 2026-03-10T07:05:54.504 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:05:54 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:05:54.146Z caller=alerting.go:391 level=warn component="rule manager" alert="unsupported value type" msg="Expanding alert template failed" err="error executing template __alert_CephOSDDown: template: __alert_CephOSDDown:1:358: executing \"__alert_CephOSDDown\" at : error calling query: found duplicate series for the match group {ceph_daemon=\"osd.5\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.5\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"28bd35e6-1c4e-11f1-9057-21b3549603fc\", cluster_addr=\"192.168.123.105\", device_class=\"hdd\", hostname=\"vm05\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.105\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.5\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.105\", device_class=\"hdd\", hostname=\"vm05\", instance=\"192.168.123.105:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.105\"}];many-to-many matching not allowed: matching labels must be unique on one side" data="unsupported value type" 2026-03-10T07:05:54.504 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:05:54 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:05:54.147Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.5\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.5\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"28bd35e6-1c4e-11f1-9057-21b3549603fc\", cluster_addr=\"192.168.123.105\", device_class=\"hdd\", hostname=\"vm05\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.105\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.5\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.105\", device_class=\"hdd\", hostname=\"vm05\", instance=\"192.168.123.105:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.105\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T07:05:55.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:54 vm05 ceph-mon[76604]: pgmap v107: 161 pgs: 161 active+clean; 457 KiB data, 225 MiB used, 160 GiB / 160 GiB avail; 968 B/s rd, 0 op/s 2026-03-10T07:05:55.084 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:54 vm02 ceph-mon[87980]: pgmap v107: 161 pgs: 161 active+clean; 457 KiB data, 225 MiB used, 160 GiB / 160 GiB avail; 968 B/s rd, 0 op/s 2026-03-10T07:05:55.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:54 vm02 ceph-mon[86149]: pgmap v107: 161 pgs: 161 active+clean; 457 KiB data, 225 MiB used, 160 GiB / 160 GiB avail; 968 B/s rd, 0 op/s 2026-03-10T07:05:56.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:56 vm05 ceph-mon[76604]: pgmap v108: 161 pgs: 161 active+clean; 457 KiB data, 225 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:05:56.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:56 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:05:56.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:56 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T07:05:56.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:56 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:05:56.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:56 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["6"], "max": 16}]: dispatch 2026-03-10T07:05:56.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:56 vm05 ceph-mon[76604]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["6"], "max": 16}]: dispatch 2026-03-10T07:05:56.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:56 vm05 ceph-mon[76604]: Upgrade: osd.6 is safe to restart 2026-03-10T07:05:56.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:56 vm05 ceph-mon[76604]: Upgrade: Updating osd.6 2026-03-10T07:05:56.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:56 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:05:56.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:56 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.6"}]: dispatch 2026-03-10T07:05:56.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:56 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:05:56.253 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:56 vm05 ceph-mon[76604]: Deploying daemon osd.6 on vm05 2026-03-10T07:05:56.334 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:56 vm02 ceph-mon[87980]: pgmap v108: 161 pgs: 161 active+clean; 457 KiB data, 225 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:05:56.334 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:56 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:05:56.334 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:56 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T07:05:56.334 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:56 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:05:56.334 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:56 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["6"], "max": 16}]: dispatch 2026-03-10T07:05:56.334 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:56 vm02 ceph-mon[87980]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["6"], "max": 16}]: dispatch 2026-03-10T07:05:56.334 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:56 vm02 ceph-mon[87980]: Upgrade: osd.6 is safe to restart 2026-03-10T07:05:56.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:56 vm02 ceph-mon[87980]: Upgrade: Updating osd.6 2026-03-10T07:05:56.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:56 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:05:56.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:56 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.6"}]: dispatch 2026-03-10T07:05:56.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:56 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:05:56.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:56 vm02 ceph-mon[87980]: Deploying daemon osd.6 on vm05 2026-03-10T07:05:56.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:56 vm02 ceph-mon[86149]: pgmap v108: 161 pgs: 161 active+clean; 457 KiB data, 225 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:05:56.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:56 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:05:56.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:56 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T07:05:56.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:56 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:05:56.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:56 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["6"], "max": 16}]: dispatch 2026-03-10T07:05:56.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:56 vm02 ceph-mon[86149]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["6"], "max": 16}]: dispatch 2026-03-10T07:05:56.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:56 vm02 ceph-mon[86149]: Upgrade: osd.6 is safe to restart 2026-03-10T07:05:56.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:56 vm02 ceph-mon[86149]: Upgrade: Updating osd.6 2026-03-10T07:05:56.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:56 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:05:56.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:56 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.6"}]: dispatch 2026-03-10T07:05:56.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:56 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:05:56.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:56 vm02 ceph-mon[86149]: Deploying daemon osd.6 on vm05 2026-03-10T07:05:56.504 INFO:journalctl@ceph.osd.6.vm05.stdout:Mar 10 07:05:56 vm05 systemd[1]: Stopping Ceph osd.6 for 28bd35e6-1c4e-11f1-9057-21b3549603fc... 2026-03-10T07:05:56.504 INFO:journalctl@ceph.osd.6.vm05.stdout:Mar 10 07:05:56 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-6[57533]: 2026-03-10T07:05:56.322+0000 7f57ba4a0700 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.6 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-10T07:05:56.504 INFO:journalctl@ceph.osd.6.vm05.stdout:Mar 10 07:05:56 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-6[57533]: 2026-03-10T07:05:56.322+0000 7f57ba4a0700 -1 osd.6 116 *** Got signal Terminated *** 2026-03-10T07:05:56.504 INFO:journalctl@ceph.osd.6.vm05.stdout:Mar 10 07:05:56 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-6[57533]: 2026-03-10T07:05:56.322+0000 7f57ba4a0700 -1 osd.6 116 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-10T07:05:57.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:57 vm05 ceph-mon[76604]: from='client.15180 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:05:57.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:57 vm05 ceph-mon[76604]: osd.6 marked itself down and dead 2026-03-10T07:05:57.254 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:05:56 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:05:56.948Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm02\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"28bd35e6-1c4e-11f1-9057-21b3549603fc\", domainname=\"(none)\", instance=\"vm02\", job=\"node\", machine=\"x86_64\", nodename=\"vm02\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm02\", job=\"node\", machine=\"x86_64\", nodename=\"vm02\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T07:05:57.254 INFO:journalctl@ceph.osd.6.vm05.stdout:Mar 10 07:05:57 vm05 podman[82415]: 2026-03-10 07:05:57.136080601 +0000 UTC m=+0.826642915 container died 9f83e32d5eb6933cecbed31e4a0e4d6375ae06cf1d58f2012c51b08434b339ca (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-6, maintainer=Guillaume Abrioux , url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, com.redhat.component=centos-stream-container, vcs-type=git, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, CEPH_POINT_RELEASE=-17.2.0, release=754, GIT_BRANCH=HEAD, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, io.k8s.display-name=CentOS Stream 8, version=8, GIT_CLEAN=True, io.openshift.expose-services=, distribution-scope=public, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, architecture=x86_64, build-date=2022-05-03T08:36:31.336870, GIT_REPO=https://github.com/ceph/ceph-container.git, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, name=centos-stream, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, RELEASE=HEAD, vendor=Red Hat, Inc., ceph=True, io.openshift.tags=base centos centos-stream, io.buildah.version=1.19.8) 2026-03-10T07:05:57.254 INFO:journalctl@ceph.osd.6.vm05.stdout:Mar 10 07:05:57 vm05 podman[82415]: 2026-03-10 07:05:57.168296802 +0000 UTC m=+0.858859115 container remove 9f83e32d5eb6933cecbed31e4a0e4d6375ae06cf1d58f2012c51b08434b339ca (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-6, release=754, version=8, com.redhat.component=centos-stream-container, vendor=Red Hat, Inc., build-date=2022-05-03T08:36:31.336870, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, io.k8s.display-name=CentOS Stream 8, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, architecture=x86_64, GIT_BRANCH=HEAD, GIT_CLEAN=True, GIT_REPO=https://github.com/ceph/ceph-container.git, CEPH_POINT_RELEASE=-17.2.0, ceph=True, io.openshift.tags=base centos centos-stream, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, io.openshift.expose-services=, name=centos-stream, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, distribution-scope=public, maintainer=Guillaume Abrioux , io.buildah.version=1.19.8, vcs-type=git, RELEASE=HEAD, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly.) 2026-03-10T07:05:57.254 INFO:journalctl@ceph.osd.6.vm05.stdout:Mar 10 07:05:57 vm05 bash[82415]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-6 2026-03-10T07:05:57.262 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:57 vm02 ceph-mon[87980]: from='client.15180 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:05:57.262 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:57 vm02 ceph-mon[87980]: osd.6 marked itself down and dead 2026-03-10T07:05:57.262 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:57 vm02 ceph-mon[86149]: from='client.15180 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:05:57.262 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:57 vm02 ceph-mon[86149]: osd.6 marked itself down and dead 2026-03-10T07:05:57.647 INFO:journalctl@ceph.osd.6.vm05.stdout:Mar 10 07:05:57 vm05 podman[82485]: 2026-03-10 07:05:57.349192411 +0000 UTC m=+0.021354098 container create 66f4759c1851deeb4ba6d5e565a7d07f5f125ddb741e173c543464768c0eaf42 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-6-deactivate, io.buildah.version=1.41.3, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.build-date=20260223, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, CEPH_REF=squid, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df) 2026-03-10T07:05:57.647 INFO:journalctl@ceph.osd.6.vm05.stdout:Mar 10 07:05:57 vm05 podman[82485]: 2026-03-10 07:05:57.400685962 +0000 UTC m=+0.072847668 container init 66f4759c1851deeb4ba6d5e565a7d07f5f125ddb741e173c543464768c0eaf42 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-6-deactivate, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.label-schema.build-date=20260223, ceph=True, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, io.buildah.version=1.41.3, CEPH_REF=squid, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team , FROM_IMAGE=quay.io/centos/centos:stream9) 2026-03-10T07:05:57.647 INFO:journalctl@ceph.osd.6.vm05.stdout:Mar 10 07:05:57 vm05 podman[82485]: 2026-03-10 07:05:57.404581942 +0000 UTC m=+0.076743639 container start 66f4759c1851deeb4ba6d5e565a7d07f5f125ddb741e173c543464768c0eaf42 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-6-deactivate, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.build-date=20260223) 2026-03-10T07:05:57.647 INFO:journalctl@ceph.osd.6.vm05.stdout:Mar 10 07:05:57 vm05 podman[82485]: 2026-03-10 07:05:57.405482266 +0000 UTC m=+0.077643963 container attach 66f4759c1851deeb4ba6d5e565a7d07f5f125ddb741e173c543464768c0eaf42 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-6-deactivate, io.buildah.version=1.41.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.build-date=20260223, CEPH_REF=squid, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image) 2026-03-10T07:05:57.647 INFO:journalctl@ceph.osd.6.vm05.stdout:Mar 10 07:05:57 vm05 podman[82485]: 2026-03-10 07:05:57.337992354 +0000 UTC m=+0.010154051 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T07:05:57.647 INFO:journalctl@ceph.osd.6.vm05.stdout:Mar 10 07:05:57 vm05 conmon[82496]: conmon 66f4759c1851deeb4ba6 : Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-66f4759c1851deeb4ba6d5e565a7d07f5f125ddb741e173c543464768c0eaf42.scope/container/memory.events 2026-03-10T07:05:57.647 INFO:journalctl@ceph.osd.6.vm05.stdout:Mar 10 07:05:57 vm05 podman[82485]: 2026-03-10 07:05:57.530454035 +0000 UTC m=+0.202615732 container died 66f4759c1851deeb4ba6d5e565a7d07f5f125ddb741e173c543464768c0eaf42 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-6-deactivate, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, CEPH_REF=squid, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True) 2026-03-10T07:05:57.647 INFO:journalctl@ceph.osd.6.vm05.stdout:Mar 10 07:05:57 vm05 podman[82485]: 2026-03-10 07:05:57.553280966 +0000 UTC m=+0.225442663 container remove 66f4759c1851deeb4ba6d5e565a7d07f5f125ddb741e173c543464768c0eaf42 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-6-deactivate, org.label-schema.build-date=20260223, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, io.buildah.version=1.41.3, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.license=GPLv2) 2026-03-10T07:05:57.647 INFO:journalctl@ceph.osd.6.vm05.stdout:Mar 10 07:05:57 vm05 systemd[1]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@osd.6.service: Deactivated successfully. 2026-03-10T07:05:57.647 INFO:journalctl@ceph.osd.6.vm05.stdout:Mar 10 07:05:57 vm05 systemd[1]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@osd.6.service: Unit process 82496 (conmon) remains running after unit stopped. 2026-03-10T07:05:57.647 INFO:journalctl@ceph.osd.6.vm05.stdout:Mar 10 07:05:57 vm05 systemd[1]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@osd.6.service: Unit process 82504 (podman) remains running after unit stopped. 2026-03-10T07:05:57.647 INFO:journalctl@ceph.osd.6.vm05.stdout:Mar 10 07:05:57 vm05 systemd[1]: Stopped Ceph osd.6 for 28bd35e6-1c4e-11f1-9057-21b3549603fc. 2026-03-10T07:05:57.647 INFO:journalctl@ceph.osd.6.vm05.stdout:Mar 10 07:05:57 vm05 systemd[1]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@osd.6.service: Consumed 20.350s CPU time, 136.4M memory peak. 2026-03-10T07:05:58.004 INFO:journalctl@ceph.osd.6.vm05.stdout:Mar 10 07:05:57 vm05 systemd[1]: Starting Ceph osd.6 for 28bd35e6-1c4e-11f1-9057-21b3549603fc... 2026-03-10T07:05:58.005 INFO:journalctl@ceph.osd.6.vm05.stdout:Mar 10 07:05:57 vm05 podman[82587]: 2026-03-10 07:05:57.848045959 +0000 UTC m=+0.018982079 container create 1140f86ca306274df54537582cdb9533eebe62fe0d090d38313fb41486befe7c (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-6-activate, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20260223, CEPH_REF=squid, org.label-schema.schema-version=1.0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, ceph=True, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team ) 2026-03-10T07:05:58.005 INFO:journalctl@ceph.osd.6.vm05.stdout:Mar 10 07:05:57 vm05 podman[82587]: 2026-03-10 07:05:57.888443989 +0000 UTC m=+0.059380119 container init 1140f86ca306274df54537582cdb9533eebe62fe0d090d38313fb41486befe7c (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-6-activate, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team , OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.build-date=20260223, ceph=True, CEPH_REF=squid, io.buildah.version=1.41.3) 2026-03-10T07:05:58.005 INFO:journalctl@ceph.osd.6.vm05.stdout:Mar 10 07:05:57 vm05 podman[82587]: 2026-03-10 07:05:57.892247265 +0000 UTC m=+0.063183395 container start 1140f86ca306274df54537582cdb9533eebe62fe0d090d38313fb41486befe7c (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-6-activate, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, ceph=True, CEPH_REF=squid, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team , OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git) 2026-03-10T07:05:58.005 INFO:journalctl@ceph.osd.6.vm05.stdout:Mar 10 07:05:57 vm05 podman[82587]: 2026-03-10 07:05:57.897552292 +0000 UTC m=+0.068488482 container attach 1140f86ca306274df54537582cdb9533eebe62fe0d090d38313fb41486befe7c (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-6-activate, CEPH_REF=squid, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20260223, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team , GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS) 2026-03-10T07:05:58.005 INFO:journalctl@ceph.osd.6.vm05.stdout:Mar 10 07:05:57 vm05 podman[82587]: 2026-03-10 07:05:57.840298402 +0000 UTC m=+0.011234543 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T07:05:58.005 INFO:journalctl@ceph.osd.6.vm05.stdout:Mar 10 07:05:57 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-6-activate[82597]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T07:05:58.005 INFO:journalctl@ceph.osd.6.vm05.stdout:Mar 10 07:05:57 vm05 bash[82587]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T07:05:58.005 INFO:journalctl@ceph.osd.6.vm05.stdout:Mar 10 07:05:57 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-6-activate[82597]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T07:05:58.005 INFO:journalctl@ceph.osd.6.vm05.stdout:Mar 10 07:05:57 vm05 bash[82587]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T07:05:58.334 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:58 vm02 ceph-mon[87980]: pgmap v109: 161 pgs: 161 active+clean; 457 KiB data, 225 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:05:58.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:58 vm02 ceph-mon[87980]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-10T07:05:58.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:58 vm02 ceph-mon[87980]: osdmap e117: 8 total, 7 up, 8 in 2026-03-10T07:05:58.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:58 vm02 ceph-mon[86149]: pgmap v109: 161 pgs: 161 active+clean; 457 KiB data, 225 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:05:58.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:58 vm02 ceph-mon[86149]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-10T07:05:58.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:58 vm02 ceph-mon[86149]: osdmap e117: 8 total, 7 up, 8 in 2026-03-10T07:05:58.419 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:58 vm05 ceph-mon[76604]: pgmap v109: 161 pgs: 161 active+clean; 457 KiB data, 225 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:05:58.419 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:58 vm05 ceph-mon[76604]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-10T07:05:58.419 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:58 vm05 ceph-mon[76604]: osdmap e117: 8 total, 7 up, 8 in 2026-03-10T07:05:58.754 INFO:journalctl@ceph.osd.6.vm05.stdout:Mar 10 07:05:58 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-6-activate[82597]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-10T07:05:58.754 INFO:journalctl@ceph.osd.6.vm05.stdout:Mar 10 07:05:58 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-6-activate[82597]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T07:05:58.754 INFO:journalctl@ceph.osd.6.vm05.stdout:Mar 10 07:05:58 vm05 bash[82587]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-10T07:05:58.754 INFO:journalctl@ceph.osd.6.vm05.stdout:Mar 10 07:05:58 vm05 bash[82587]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T07:05:58.754 INFO:journalctl@ceph.osd.6.vm05.stdout:Mar 10 07:05:58 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-6-activate[82597]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T07:05:58.754 INFO:journalctl@ceph.osd.6.vm05.stdout:Mar 10 07:05:58 vm05 bash[82587]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T07:05:58.754 INFO:journalctl@ceph.osd.6.vm05.stdout:Mar 10 07:05:58 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-6-activate[82597]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-6 2026-03-10T07:05:58.754 INFO:journalctl@ceph.osd.6.vm05.stdout:Mar 10 07:05:58 vm05 bash[82587]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-6 2026-03-10T07:05:58.754 INFO:journalctl@ceph.osd.6.vm05.stdout:Mar 10 07:05:58 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-6-activate[82597]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-3fafd1a3-fab7-4fe9-a45e-4c6d63008743/osd-block-d5098a9b-8d57-4249-b546-8ac52d23059a --path /var/lib/ceph/osd/ceph-6 --no-mon-config 2026-03-10T07:05:58.754 INFO:journalctl@ceph.osd.6.vm05.stdout:Mar 10 07:05:58 vm05 bash[82587]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-3fafd1a3-fab7-4fe9-a45e-4c6d63008743/osd-block-d5098a9b-8d57-4249-b546-8ac52d23059a --path /var/lib/ceph/osd/ceph-6 --no-mon-config 2026-03-10T07:05:58.754 INFO:journalctl@ceph.osd.6.vm05.stdout:Mar 10 07:05:58 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-6-activate[82597]: Running command: /usr/bin/ln -snf /dev/ceph-3fafd1a3-fab7-4fe9-a45e-4c6d63008743/osd-block-d5098a9b-8d57-4249-b546-8ac52d23059a /var/lib/ceph/osd/ceph-6/block 2026-03-10T07:05:58.754 INFO:journalctl@ceph.osd.6.vm05.stdout:Mar 10 07:05:58 vm05 bash[82587]: Running command: /usr/bin/ln -snf /dev/ceph-3fafd1a3-fab7-4fe9-a45e-4c6d63008743/osd-block-d5098a9b-8d57-4249-b546-8ac52d23059a /var/lib/ceph/osd/ceph-6/block 2026-03-10T07:05:59.094 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:59 vm05 ceph-mon[76604]: osdmap e118: 8 total, 7 up, 8 in 2026-03-10T07:05:59.094 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:59 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:05:59.094 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:05:59 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:05:59.094 INFO:journalctl@ceph.osd.6.vm05.stdout:Mar 10 07:05:58 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-6-activate[82597]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-6/block 2026-03-10T07:05:59.094 INFO:journalctl@ceph.osd.6.vm05.stdout:Mar 10 07:05:58 vm05 bash[82587]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-6/block 2026-03-10T07:05:59.094 INFO:journalctl@ceph.osd.6.vm05.stdout:Mar 10 07:05:58 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-6-activate[82597]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-2 2026-03-10T07:05:59.094 INFO:journalctl@ceph.osd.6.vm05.stdout:Mar 10 07:05:58 vm05 bash[82587]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-2 2026-03-10T07:05:59.094 INFO:journalctl@ceph.osd.6.vm05.stdout:Mar 10 07:05:58 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-6-activate[82597]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-6 2026-03-10T07:05:59.094 INFO:journalctl@ceph.osd.6.vm05.stdout:Mar 10 07:05:58 vm05 bash[82587]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-6 2026-03-10T07:05:59.094 INFO:journalctl@ceph.osd.6.vm05.stdout:Mar 10 07:05:58 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-6-activate[82597]: --> ceph-volume lvm activate successful for osd ID: 6 2026-03-10T07:05:59.094 INFO:journalctl@ceph.osd.6.vm05.stdout:Mar 10 07:05:58 vm05 bash[82587]: --> ceph-volume lvm activate successful for osd ID: 6 2026-03-10T07:05:59.094 INFO:journalctl@ceph.osd.6.vm05.stdout:Mar 10 07:05:58 vm05 podman[82809]: 2026-03-10 07:05:58.798968306 +0000 UTC m=+0.009605935 container died 1140f86ca306274df54537582cdb9533eebe62fe0d090d38313fb41486befe7c (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-6-activate, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, io.buildah.version=1.41.3, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20260223, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.vendor=CentOS, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9) 2026-03-10T07:05:59.094 INFO:journalctl@ceph.osd.6.vm05.stdout:Mar 10 07:05:58 vm05 podman[82809]: 2026-03-10 07:05:58.813370144 +0000 UTC m=+0.024007763 container remove 1140f86ca306274df54537582cdb9533eebe62fe0d090d38313fb41486befe7c (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-6-activate, org.label-schema.build-date=20260223, ceph=True, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.name=CentOS Stream 9 Base Image) 2026-03-10T07:05:59.094 INFO:journalctl@ceph.osd.6.vm05.stdout:Mar 10 07:05:58 vm05 podman[82847]: 2026-03-10 07:05:58.903711904 +0000 UTC m=+0.016794586 container create affae9a808f3e5e6d40a6c29be51d9f6a3469778290d112eab458cd556d63f0c (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-6, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.vendor=CentOS, CEPH_REF=squid, OSD_FLAVOR=default, org.label-schema.build-date=20260223, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-10T07:05:59.094 INFO:journalctl@ceph.osd.6.vm05.stdout:Mar 10 07:05:58 vm05 podman[82847]: 2026-03-10 07:05:58.94132875 +0000 UTC m=+0.054411432 container init affae9a808f3e5e6d40a6c29be51d9f6a3469778290d112eab458cd556d63f0c (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-6, org.opencontainers.image.authors=Ceph Release Team , OSD_FLAVOR=default, org.label-schema.build-date=20260223, CEPH_REF=squid, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9) 2026-03-10T07:05:59.094 INFO:journalctl@ceph.osd.6.vm05.stdout:Mar 10 07:05:58 vm05 podman[82847]: 2026-03-10 07:05:58.944259525 +0000 UTC m=+0.057342207 container start affae9a808f3e5e6d40a6c29be51d9f6a3469778290d112eab458cd556d63f0c (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-6, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_REF=squid, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.build-date=20260223, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2) 2026-03-10T07:05:59.094 INFO:journalctl@ceph.osd.6.vm05.stdout:Mar 10 07:05:58 vm05 bash[82847]: affae9a808f3e5e6d40a6c29be51d9f6a3469778290d112eab458cd556d63f0c 2026-03-10T07:05:59.094 INFO:journalctl@ceph.osd.6.vm05.stdout:Mar 10 07:05:58 vm05 podman[82847]: 2026-03-10 07:05:58.897709774 +0000 UTC m=+0.010792466 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T07:05:59.094 INFO:journalctl@ceph.osd.6.vm05.stdout:Mar 10 07:05:58 vm05 systemd[1]: Started Ceph osd.6 for 28bd35e6-1c4e-11f1-9057-21b3549603fc. 2026-03-10T07:05:59.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:59 vm02 ceph-mon[87980]: osdmap e118: 8 total, 7 up, 8 in 2026-03-10T07:05:59.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:59 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:05:59.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:05:59 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:05:59.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:59 vm02 ceph-mon[86149]: osdmap e118: 8 total, 7 up, 8 in 2026-03-10T07:05:59.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:59 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:05:59.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:05:59 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:05:59.847 INFO:journalctl@ceph.osd.6.vm05.stdout:Mar 10 07:05:59 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-6[82857]: 2026-03-10T07:05:59.526+0000 7fedaa5aa740 -1 Falling back to public interface 2026-03-10T07:06:00.129 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:00 vm05 ceph-mon[76604]: pgmap v112: 161 pgs: 1 active+undersized, 15 peering, 6 stale+active+clean, 139 active+clean; 457 KiB data, 225 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-10T07:06:00.129 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:00 vm05 ceph-mon[76604]: Health check failed: Reduced data availability: 1 pg peering (PG_AVAILABILITY) 2026-03-10T07:06:00.505 INFO:journalctl@ceph.osd.6.vm05.stdout:Mar 10 07:06:00 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-6[82857]: 2026-03-10T07:06:00.127+0000 7fedaa5aa740 -1 osd.6 0 read_superblock omap replica is missing. 2026-03-10T07:06:00.505 INFO:journalctl@ceph.osd.6.vm05.stdout:Mar 10 07:06:00 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-6[82857]: 2026-03-10T07:06:00.167+0000 7fedaa5aa740 -1 osd.6 116 log_to_monitors true 2026-03-10T07:06:00.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:00 vm02 ceph-mon[87980]: pgmap v112: 161 pgs: 1 active+undersized, 15 peering, 6 stale+active+clean, 139 active+clean; 457 KiB data, 225 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-10T07:06:00.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:00 vm02 ceph-mon[87980]: Health check failed: Reduced data availability: 1 pg peering (PG_AVAILABILITY) 2026-03-10T07:06:00.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:00 vm02 ceph-mon[86149]: pgmap v112: 161 pgs: 1 active+undersized, 15 peering, 6 stale+active+clean, 139 active+clean; 457 KiB data, 225 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-10T07:06:00.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:00 vm02 ceph-mon[86149]: Health check failed: Reduced data availability: 1 pg peering (PG_AVAILABILITY) 2026-03-10T07:06:01.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:01 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:01.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:01 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:01.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:01 vm05 ceph-mon[76604]: from='osd.6 [v2:192.168.123.105:6816/2431883505,v1:192.168.123.105:6817/2431883505]' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-10T07:06:01.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:01 vm05 ceph-mon[76604]: from='osd.6 ' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-10T07:06:01.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:01 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:01.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:01 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:01.504 INFO:journalctl@ceph.osd.6.vm05.stdout:Mar 10 07:06:01 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-6[82857]: 2026-03-10T07:06:01.164+0000 7feda2355640 -1 osd.6 116 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-10T07:06:01.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:01 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:01.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:01 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:01.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:01 vm02 ceph-mon[87980]: from='osd.6 [v2:192.168.123.105:6816/2431883505,v1:192.168.123.105:6817/2431883505]' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-10T07:06:01.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:01 vm02 ceph-mon[87980]: from='osd.6 ' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-10T07:06:01.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:01 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:01.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:01 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:01.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:01 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:01.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:01 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:01.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:01 vm02 ceph-mon[86149]: from='osd.6 [v2:192.168.123.105:6816/2431883505,v1:192.168.123.105:6817/2431883505]' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-10T07:06:01.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:01 vm02 ceph-mon[86149]: from='osd.6 ' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-10T07:06:01.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:01 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:01.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:01 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:02.085 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:06:01 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: ::ffff:192.168.123.105 - - [10/Mar/2026:07:06:01] "GET /metrics HTTP/1.1" 200 38088 "" "Prometheus/2.51.0" 2026-03-10T07:06:02.085 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:06:01 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: 2026-03-10T07:06:01.719+0000 7f03c12b6640 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (17 PGs are or would become offline) 2026-03-10T07:06:02.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:02 vm05 ceph-mon[76604]: pgmap v113: 161 pgs: 14 active+undersized, 15 peering, 1 stale+active+clean, 9 active+undersized+degraded, 122 active+clean; 457 KiB data, 225 MiB used, 160 GiB / 160 GiB avail; 30/630 objects degraded (4.762%) 2026-03-10T07:06:02.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:02 vm05 ceph-mon[76604]: from='osd.6 ' entity='osd.6' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]': finished 2026-03-10T07:06:02.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:02 vm05 ceph-mon[76604]: osdmap e119: 8 total, 7 up, 8 in 2026-03-10T07:06:02.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:02 vm05 ceph-mon[76604]: from='osd.6 [v2:192.168.123.105:6816/2431883505,v1:192.168.123.105:6817/2431883505]' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm05", "root=default"]}]: dispatch 2026-03-10T07:06:02.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:02 vm05 ceph-mon[76604]: from='osd.6 ' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm05", "root=default"]}]: dispatch 2026-03-10T07:06:02.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:02 vm05 ceph-mon[76604]: Health check failed: Degraded data redundancy: 30/630 objects degraded (4.762%), 9 pgs degraded (PG_DEGRADED) 2026-03-10T07:06:02.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:02 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:02.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:02 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:02.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:02 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:06:02.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:02 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T07:06:02.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:02 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:02.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:02 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T07:06:02.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:02 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:06:02.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:02 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:06:02.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:02 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:06:02.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:02 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["7"], "max": 16}]: dispatch 2026-03-10T07:06:02.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:02 vm02 ceph-mon[87980]: pgmap v113: 161 pgs: 14 active+undersized, 15 peering, 1 stale+active+clean, 9 active+undersized+degraded, 122 active+clean; 457 KiB data, 225 MiB used, 160 GiB / 160 GiB avail; 30/630 objects degraded (4.762%) 2026-03-10T07:06:02.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:02 vm02 ceph-mon[87980]: from='osd.6 ' entity='osd.6' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]': finished 2026-03-10T07:06:02.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:02 vm02 ceph-mon[87980]: osdmap e119: 8 total, 7 up, 8 in 2026-03-10T07:06:02.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:02 vm02 ceph-mon[87980]: from='osd.6 [v2:192.168.123.105:6816/2431883505,v1:192.168.123.105:6817/2431883505]' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm05", "root=default"]}]: dispatch 2026-03-10T07:06:02.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:02 vm02 ceph-mon[87980]: from='osd.6 ' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm05", "root=default"]}]: dispatch 2026-03-10T07:06:02.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:02 vm02 ceph-mon[87980]: Health check failed: Degraded data redundancy: 30/630 objects degraded (4.762%), 9 pgs degraded (PG_DEGRADED) 2026-03-10T07:06:02.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:02 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:02.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:02 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:02.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:02 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:06:02.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:02 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T07:06:02.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:02 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:02.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:02 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T07:06:02.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:02 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:06:02.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:02 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:06:02.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:02 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:06:02.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:02 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["7"], "max": 16}]: dispatch 2026-03-10T07:06:02.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:02 vm02 ceph-mon[86149]: pgmap v113: 161 pgs: 14 active+undersized, 15 peering, 1 stale+active+clean, 9 active+undersized+degraded, 122 active+clean; 457 KiB data, 225 MiB used, 160 GiB / 160 GiB avail; 30/630 objects degraded (4.762%) 2026-03-10T07:06:02.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:02 vm02 ceph-mon[86149]: from='osd.6 ' entity='osd.6' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]': finished 2026-03-10T07:06:02.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:02 vm02 ceph-mon[86149]: osdmap e119: 8 total, 7 up, 8 in 2026-03-10T07:06:02.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:02 vm02 ceph-mon[86149]: from='osd.6 [v2:192.168.123.105:6816/2431883505,v1:192.168.123.105:6817/2431883505]' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm05", "root=default"]}]: dispatch 2026-03-10T07:06:02.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:02 vm02 ceph-mon[86149]: from='osd.6 ' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm05", "root=default"]}]: dispatch 2026-03-10T07:06:02.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:02 vm02 ceph-mon[86149]: Health check failed: Degraded data redundancy: 30/630 objects degraded (4.762%), 9 pgs degraded (PG_DEGRADED) 2026-03-10T07:06:02.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:02 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:02.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:02 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:02.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:02 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:06:02.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:02 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T07:06:02.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:02 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:02.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:02 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T07:06:02.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:02 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:06:02.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:02 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:06:02.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:02 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:06:02.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:02 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["7"], "max": 16}]: dispatch 2026-03-10T07:06:03.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:03 vm05 ceph-mon[76604]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["7"], "max": 16}]: dispatch 2026-03-10T07:06:03.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:03 vm05 ceph-mon[76604]: Upgrade: unsafe to stop osd(s) at this time (17 PGs are or would become offline) 2026-03-10T07:06:03.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:03 vm05 ceph-mon[76604]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-10T07:06:03.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:03 vm05 ceph-mon[76604]: osd.6 [v2:192.168.123.105:6816/2431883505,v1:192.168.123.105:6817/2431883505] boot 2026-03-10T07:06:03.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:03 vm05 ceph-mon[76604]: osdmap e120: 8 total, 8 up, 8 in 2026-03-10T07:06:03.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:03 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T07:06:03.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:03 vm02 ceph-mon[87980]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["7"], "max": 16}]: dispatch 2026-03-10T07:06:03.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:03 vm02 ceph-mon[87980]: Upgrade: unsafe to stop osd(s) at this time (17 PGs are or would become offline) 2026-03-10T07:06:03.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:03 vm02 ceph-mon[87980]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-10T07:06:03.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:03 vm02 ceph-mon[87980]: osd.6 [v2:192.168.123.105:6816/2431883505,v1:192.168.123.105:6817/2431883505] boot 2026-03-10T07:06:03.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:03 vm02 ceph-mon[87980]: osdmap e120: 8 total, 8 up, 8 in 2026-03-10T07:06:03.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:03 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T07:06:03.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:03 vm02 ceph-mon[86149]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["7"], "max": 16}]: dispatch 2026-03-10T07:06:03.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:03 vm02 ceph-mon[86149]: Upgrade: unsafe to stop osd(s) at this time (17 PGs are or would become offline) 2026-03-10T07:06:03.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:03 vm02 ceph-mon[86149]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-10T07:06:03.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:03 vm02 ceph-mon[86149]: osd.6 [v2:192.168.123.105:6816/2431883505,v1:192.168.123.105:6817/2431883505] boot 2026-03-10T07:06:03.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:03 vm02 ceph-mon[86149]: osdmap e120: 8 total, 8 up, 8 in 2026-03-10T07:06:03.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:03 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T07:06:04.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:04 vm05 ceph-mon[76604]: pgmap v116: 161 pgs: 22 active+undersized, 15 peering, 16 active+undersized+degraded, 108 active+clean; 457 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 53/630 objects degraded (8.413%) 2026-03-10T07:06:04.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:04 vm05 ceph-mon[76604]: osdmap e121: 8 total, 8 up, 8 in 2026-03-10T07:06:04.503 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:06:04 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:06:04.147Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.6\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.6\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"28bd35e6-1c4e-11f1-9057-21b3549603fc\", cluster_addr=\"192.168.123.105\", device_class=\"hdd\", hostname=\"vm05\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.105\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.6\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.105\", device_class=\"hdd\", hostname=\"vm05\", instance=\"192.168.123.105:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.105\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T07:06:04.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:04 vm02 ceph-mon[87980]: pgmap v116: 161 pgs: 22 active+undersized, 15 peering, 16 active+undersized+degraded, 108 active+clean; 457 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 53/630 objects degraded (8.413%) 2026-03-10T07:06:04.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:04 vm02 ceph-mon[87980]: osdmap e121: 8 total, 8 up, 8 in 2026-03-10T07:06:04.584 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:04 vm02 ceph-mon[86149]: pgmap v116: 161 pgs: 22 active+undersized, 15 peering, 16 active+undersized+degraded, 108 active+clean; 457 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 53/630 objects degraded (8.413%) 2026-03-10T07:06:04.584 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:04 vm02 ceph-mon[86149]: osdmap e121: 8 total, 8 up, 8 in 2026-03-10T07:06:06.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:06 vm05 ceph-mon[76604]: pgmap v118: 161 pgs: 22 active+undersized, 15 peering, 16 active+undersized+degraded, 108 active+clean; 457 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 53/630 objects degraded (8.413%) 2026-03-10T07:06:06.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:06 vm02 ceph-mon[87980]: pgmap v118: 161 pgs: 22 active+undersized, 15 peering, 16 active+undersized+degraded, 108 active+clean; 457 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 53/630 objects degraded (8.413%) 2026-03-10T07:06:06.584 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:06 vm02 ceph-mon[86149]: pgmap v118: 161 pgs: 22 active+undersized, 15 peering, 16 active+undersized+degraded, 108 active+clean; 457 KiB data, 243 MiB used, 160 GiB / 160 GiB avail; 53/630 objects degraded (8.413%) 2026-03-10T07:06:07.211 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:06:06 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:06:06.947Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm02\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"28bd35e6-1c4e-11f1-9057-21b3549603fc\", domainname=\"(none)\", instance=\"vm02\", job=\"node\", machine=\"x86_64\", nodename=\"vm02\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm02\", job=\"node\", machine=\"x86_64\", nodename=\"vm02\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T07:06:07.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:07 vm05 ceph-mon[76604]: from='client.15180 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:06:07.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:07 vm02 ceph-mon[87980]: from='client.15180 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:06:07.584 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:07 vm02 ceph-mon[86149]: from='client.15180 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:06:08.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:08 vm05 ceph-mon[76604]: pgmap v119: 161 pgs: 9 peering, 152 active+clean; 457 KiB data, 244 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 0 op/s 2026-03-10T07:06:08.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:08 vm05 ceph-mon[76604]: Health check cleared: PG_AVAILABILITY (was: Reduced data availability: 1 pg peering) 2026-03-10T07:06:08.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:08 vm05 ceph-mon[76604]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 53/630 objects degraded (8.413%), 16 pgs degraded) 2026-03-10T07:06:08.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:08 vm05 ceph-mon[76604]: Cluster is now healthy 2026-03-10T07:06:08.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:08 vm02 ceph-mon[87980]: pgmap v119: 161 pgs: 9 peering, 152 active+clean; 457 KiB data, 244 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 0 op/s 2026-03-10T07:06:08.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:08 vm02 ceph-mon[87980]: Health check cleared: PG_AVAILABILITY (was: Reduced data availability: 1 pg peering) 2026-03-10T07:06:08.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:08 vm02 ceph-mon[87980]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 53/630 objects degraded (8.413%), 16 pgs degraded) 2026-03-10T07:06:08.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:08 vm02 ceph-mon[87980]: Cluster is now healthy 2026-03-10T07:06:08.584 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:08 vm02 ceph-mon[86149]: pgmap v119: 161 pgs: 9 peering, 152 active+clean; 457 KiB data, 244 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 0 op/s 2026-03-10T07:06:08.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:08 vm02 ceph-mon[86149]: Health check cleared: PG_AVAILABILITY (was: Reduced data availability: 1 pg peering) 2026-03-10T07:06:08.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:08 vm02 ceph-mon[86149]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 53/630 objects degraded (8.413%), 16 pgs degraded) 2026-03-10T07:06:08.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:08 vm02 ceph-mon[86149]: Cluster is now healthy 2026-03-10T07:06:09.550 INFO:teuthology.orchestra.run.vm02.stdout:true 2026-03-10T07:06:09.951 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T07:06:09.951 INFO:teuthology.orchestra.run.vm02.stdout:alertmanager.a vm02 *:9093,9094 running (3m) 75s ago 7m 22.5M - 0.25.0 c8568f914cd2 520cbcc5ad98 2026-03-10T07:06:09.951 INFO:teuthology.orchestra.run.vm02.stdout:grafana.a vm05 *:3000 running (3m) 9s ago 7m 45.2M - dad864ee21e9 34567dcb4b51 2026-03-10T07:06:09.951 INFO:teuthology.orchestra.run.vm02.stdout:iscsi.foo.vm02.iphfbm vm02 running (2m) 75s ago 7m 52.0M - 3.5 e1d6a67b021e a06caff18850 2026-03-10T07:06:09.951 INFO:teuthology.orchestra.run.vm02.stdout:mgr.x vm05 *:8443,9283,8765 running (2m) 9s ago 9m 486M - 19.2.3-678-ge911bdeb 654f31e6858e cdd4c8db2d43 2026-03-10T07:06:09.951 INFO:teuthology.orchestra.run.vm02.stdout:mgr.y vm02 *:8443,9283,8765 running (3m) 75s ago 9m 554M - 19.2.3-678-ge911bdeb 654f31e6858e e1dc22afcf31 2026-03-10T07:06:09.951 INFO:teuthology.orchestra.run.vm02.stdout:mon.a vm02 running (2m) 75s ago 9m 48.7M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 194e84dd73c4 2026-03-10T07:06:09.951 INFO:teuthology.orchestra.run.vm02.stdout:mon.b vm05 running (2m) 9s ago 9m 37.5M 2048M 19.2.3-678-ge911bdeb 654f31e6858e e901cead026d 2026-03-10T07:06:09.951 INFO:teuthology.orchestra.run.vm02.stdout:mon.c vm02 running (2m) 75s ago 9m 42.5M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 959c5054b3d9 2026-03-10T07:06:09.951 INFO:teuthology.orchestra.run.vm02.stdout:node-exporter.a vm02 *:9100 running (3m) 75s ago 7m 9424k - 1.7.0 72c9c2088986 0bc0b34c732a 2026-03-10T07:06:09.951 INFO:teuthology.orchestra.run.vm02.stdout:node-exporter.b vm05 *:9100 running (3m) 9s ago 7m 9.88M - 1.7.0 72c9c2088986 0129ed456f9d 2026-03-10T07:06:09.951 INFO:teuthology.orchestra.run.vm02.stdout:osd.0 vm02 running (2m) 75s ago 9m 69.0M 4096M 19.2.3-678-ge911bdeb 654f31e6858e bf86ac25f7fb 2026-03-10T07:06:09.951 INFO:teuthology.orchestra.run.vm02.stdout:osd.1 vm02 running (2m) 75s ago 8m 47.5M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 6a939dbe6adc 2026-03-10T07:06:09.951 INFO:teuthology.orchestra.run.vm02.stdout:osd.2 vm02 running (99s) 75s ago 8m 44.7M 4096M 19.2.3-678-ge911bdeb 654f31e6858e b0f12ddcd6b3 2026-03-10T07:06:09.951 INFO:teuthology.orchestra.run.vm02.stdout:osd.3 vm02 running (76s) 75s ago 8m 12.7M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 396c050a0b4f 2026-03-10T07:06:09.951 INFO:teuthology.orchestra.run.vm02.stdout:osd.4 vm05 running (54s) 9s ago 8m 50.0M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 05476fb1f9d8 2026-03-10T07:06:09.951 INFO:teuthology.orchestra.run.vm02.stdout:osd.5 vm05 running (32s) 9s ago 8m 66.8M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 37ab69a8d103 2026-03-10T07:06:09.951 INFO:teuthology.orchestra.run.vm02.stdout:osd.6 vm05 running (11s) 9s ago 8m 12.4M 4096M 19.2.3-678-ge911bdeb 654f31e6858e affae9a808f3 2026-03-10T07:06:09.951 INFO:teuthology.orchestra.run.vm02.stdout:osd.7 vm05 running (8m) 9s ago 8m 57.7M 4096M 17.2.0 e1d6a67b021e 83d454094982 2026-03-10T07:06:09.951 INFO:teuthology.orchestra.run.vm02.stdout:prometheus.a vm05 *:9095 running (2m) 9s ago 7m 44.2M - 2.51.0 1d3b7f56885b 2f14ff887bc7 2026-03-10T07:06:09.951 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.kkmsll vm02 *:8000 running (7m) 75s ago 7m 94.2M - 17.2.0 e1d6a67b021e ef903c439808 2026-03-10T07:06:09.951 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm05.bmslvs vm05 *:8000 running (7m) 9s ago 7m 94.4M - 17.2.0 e1d6a67b021e acd35f4810d9 2026-03-10T07:06:09.951 INFO:teuthology.orchestra.run.vm02.stdout:rgw.smpl.vm02.kyvfxo vm02 *:80 running (7m) 75s ago 7m 92.1M - 17.2.0 e1d6a67b021e 6c68381e5378 2026-03-10T07:06:09.951 INFO:teuthology.orchestra.run.vm02.stdout:rgw.smpl.vm05.xjafam vm05 *:80 running (7m) 9s ago 7m 94.4M - 17.2.0 e1d6a67b021e 62a81876b05e 2026-03-10T07:06:10.193 INFO:teuthology.orchestra.run.vm02.stdout:{ 2026-03-10T07:06:10.193 INFO:teuthology.orchestra.run.vm02.stdout: "mon": { 2026-03-10T07:06:10.193 INFO:teuthology.orchestra.run.vm02.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 3 2026-03-10T07:06:10.193 INFO:teuthology.orchestra.run.vm02.stdout: }, 2026-03-10T07:06:10.193 INFO:teuthology.orchestra.run.vm02.stdout: "mgr": { 2026-03-10T07:06:10.193 INFO:teuthology.orchestra.run.vm02.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-10T07:06:10.193 INFO:teuthology.orchestra.run.vm02.stdout: }, 2026-03-10T07:06:10.193 INFO:teuthology.orchestra.run.vm02.stdout: "osd": { 2026-03-10T07:06:10.193 INFO:teuthology.orchestra.run.vm02.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 1, 2026-03-10T07:06:10.193 INFO:teuthology.orchestra.run.vm02.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 7 2026-03-10T07:06:10.193 INFO:teuthology.orchestra.run.vm02.stdout: }, 2026-03-10T07:06:10.193 INFO:teuthology.orchestra.run.vm02.stdout: "rgw": { 2026-03-10T07:06:10.193 INFO:teuthology.orchestra.run.vm02.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 4 2026-03-10T07:06:10.193 INFO:teuthology.orchestra.run.vm02.stdout: }, 2026-03-10T07:06:10.193 INFO:teuthology.orchestra.run.vm02.stdout: "overall": { 2026-03-10T07:06:10.193 INFO:teuthology.orchestra.run.vm02.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 5, 2026-03-10T07:06:10.193 INFO:teuthology.orchestra.run.vm02.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 12 2026-03-10T07:06:10.193 INFO:teuthology.orchestra.run.vm02.stdout: } 2026-03-10T07:06:10.193 INFO:teuthology.orchestra.run.vm02.stdout:} 2026-03-10T07:06:10.231 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:10 vm02 ceph-mon[86149]: pgmap v120: 161 pgs: 161 active+clean; 457 KiB data, 244 MiB used, 160 GiB / 160 GiB avail; 129 B/s rd, 0 op/s 2026-03-10T07:06:10.405 INFO:teuthology.orchestra.run.vm02.stdout:{ 2026-03-10T07:06:10.405 INFO:teuthology.orchestra.run.vm02.stdout: "target_image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", 2026-03-10T07:06:10.405 INFO:teuthology.orchestra.run.vm02.stdout: "in_progress": true, 2026-03-10T07:06:10.405 INFO:teuthology.orchestra.run.vm02.stdout: "which": "Upgrading all daemon types on all hosts", 2026-03-10T07:06:10.406 INFO:teuthology.orchestra.run.vm02.stdout: "services_complete": [ 2026-03-10T07:06:10.406 INFO:teuthology.orchestra.run.vm02.stdout: "mgr", 2026-03-10T07:06:10.406 INFO:teuthology.orchestra.run.vm02.stdout: "mon" 2026-03-10T07:06:10.406 INFO:teuthology.orchestra.run.vm02.stdout: ], 2026-03-10T07:06:10.406 INFO:teuthology.orchestra.run.vm02.stdout: "progress": "12/23 daemons upgraded", 2026-03-10T07:06:10.406 INFO:teuthology.orchestra.run.vm02.stdout: "message": "Currently upgrading osd daemons", 2026-03-10T07:06:10.406 INFO:teuthology.orchestra.run.vm02.stdout: "is_paused": false 2026-03-10T07:06:10.406 INFO:teuthology.orchestra.run.vm02.stdout:} 2026-03-10T07:06:10.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:10 vm05 ceph-mon[76604]: pgmap v120: 161 pgs: 161 active+clean; 457 KiB data, 244 MiB used, 160 GiB / 160 GiB avail; 129 B/s rd, 0 op/s 2026-03-10T07:06:10.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:10 vm05 ceph-mon[76604]: from='client.44227 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:06:10.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:10 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T07:06:10.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:10 vm05 ceph-mon[76604]: from='client.? 192.168.123.102:0/2351003962' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:06:10.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:10 vm02 ceph-mon[87980]: pgmap v120: 161 pgs: 161 active+clean; 457 KiB data, 244 MiB used, 160 GiB / 160 GiB avail; 129 B/s rd, 0 op/s 2026-03-10T07:06:10.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:10 vm02 ceph-mon[87980]: from='client.44227 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:06:10.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:10 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T07:06:10.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:10 vm02 ceph-mon[87980]: from='client.? 192.168.123.102:0/2351003962' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:06:10.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:10 vm02 ceph-mon[86149]: from='client.44227 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:06:10.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:10 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T07:06:10.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:10 vm02 ceph-mon[86149]: from='client.? 192.168.123.102:0/2351003962' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:06:10.650 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_OK 2026-03-10T07:06:11.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:11 vm05 ceph-mon[76604]: from='client.44233 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:06:11.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:11 vm05 ceph-mon[76604]: from='client.34326 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:06:11.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:11 vm05 ceph-mon[76604]: from='client.34338 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:06:11.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:11 vm05 ceph-mon[76604]: from='client.? 192.168.123.102:0/321980064' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T07:06:11.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:11 vm02 ceph-mon[87980]: from='client.44233 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:06:11.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:11 vm02 ceph-mon[87980]: from='client.34326 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:06:11.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:11 vm02 ceph-mon[87980]: from='client.34338 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:06:11.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:11 vm02 ceph-mon[87980]: from='client.? 192.168.123.102:0/321980064' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T07:06:11.584 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:11 vm02 ceph-mon[86149]: from='client.44233 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:06:11.584 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:11 vm02 ceph-mon[86149]: from='client.34326 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:06:11.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:11 vm02 ceph-mon[86149]: from='client.34338 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:06:11.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:11 vm02 ceph-mon[86149]: from='client.? 192.168.123.102:0/321980064' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T07:06:12.084 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:06:11 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: ::ffff:192.168.123.105 - - [10/Mar/2026:07:06:11] "GET /metrics HTTP/1.1" 200 38108 "" "Prometheus/2.51.0" 2026-03-10T07:06:12.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:12 vm05 ceph-mon[76604]: pgmap v121: 161 pgs: 161 active+clean; 457 KiB data, 244 MiB used, 160 GiB / 160 GiB avail; 458 B/s rd, 0 op/s 2026-03-10T07:06:12.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:12 vm02 ceph-mon[87980]: pgmap v121: 161 pgs: 161 active+clean; 457 KiB data, 244 MiB used, 160 GiB / 160 GiB avail; 458 B/s rd, 0 op/s 2026-03-10T07:06:12.584 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:12 vm02 ceph-mon[86149]: pgmap v121: 161 pgs: 161 active+clean; 457 KiB data, 244 MiB used, 160 GiB / 160 GiB avail; 458 B/s rd, 0 op/s 2026-03-10T07:06:14.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:14 vm05 ceph-mon[76604]: pgmap v122: 161 pgs: 161 active+clean; 457 KiB data, 244 MiB used, 160 GiB / 160 GiB avail; 921 B/s rd, 0 op/s 2026-03-10T07:06:14.504 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:06:14 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:06:14.146Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.7\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.7\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"28bd35e6-1c4e-11f1-9057-21b3549603fc\", cluster_addr=\"192.168.123.105\", device_class=\"hdd\", hostname=\"vm05\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.105\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.7\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.105\", device_class=\"hdd\", hostname=\"vm05\", instance=\"192.168.123.105:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.105\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T07:06:14.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:14 vm02 ceph-mon[87980]: pgmap v122: 161 pgs: 161 active+clean; 457 KiB data, 244 MiB used, 160 GiB / 160 GiB avail; 921 B/s rd, 0 op/s 2026-03-10T07:06:14.584 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:14 vm02 ceph-mon[86149]: pgmap v122: 161 pgs: 161 active+clean; 457 KiB data, 244 MiB used, 160 GiB / 160 GiB avail; 921 B/s rd, 0 op/s 2026-03-10T07:06:16.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:16 vm05 ceph-mon[76604]: pgmap v123: 161 pgs: 161 active+clean; 457 KiB data, 244 MiB used, 160 GiB / 160 GiB avail; 773 B/s rd, 0 op/s 2026-03-10T07:06:16.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:16 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:16.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:16 vm02 ceph-mon[87980]: pgmap v123: 161 pgs: 161 active+clean; 457 KiB data, 244 MiB used, 160 GiB / 160 GiB avail; 773 B/s rd, 0 op/s 2026-03-10T07:06:16.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:16 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:16.584 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:16 vm02 ceph-mon[86149]: pgmap v123: 161 pgs: 161 active+clean; 457 KiB data, 244 MiB used, 160 GiB / 160 GiB avail; 773 B/s rd, 0 op/s 2026-03-10T07:06:16.584 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:16 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:16.995 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:06:16 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:06:16.947Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm02\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"28bd35e6-1c4e-11f1-9057-21b3549603fc\", domainname=\"(none)\", instance=\"vm02\", job=\"node\", machine=\"x86_64\", nodename=\"vm02\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm02\", job=\"node\", machine=\"x86_64\", nodename=\"vm02\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T07:06:17.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:17 vm05 ceph-mon[76604]: from='client.15180 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:06:17.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:17 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["7"], "max": 16}]: dispatch 2026-03-10T07:06:17.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:17 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:17.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:17 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.7"}]: dispatch 2026-03-10T07:06:17.254 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:17 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:06:17.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:17 vm02 ceph-mon[87980]: from='client.15180 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:06:17.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:17 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["7"], "max": 16}]: dispatch 2026-03-10T07:06:17.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:17 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:17.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:17 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.7"}]: dispatch 2026-03-10T07:06:17.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:17 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:06:17.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:17 vm02 ceph-mon[86149]: from='client.15180 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:06:17.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:17 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["7"], "max": 16}]: dispatch 2026-03-10T07:06:17.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:17 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:17.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:17 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.7"}]: dispatch 2026-03-10T07:06:17.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:17 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:06:18.182 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:06:17 vm05 systemd[1]: Stopping Ceph osd.7 for 28bd35e6-1c4e-11f1-9057-21b3549603fc... 2026-03-10T07:06:18.183 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:06:18 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-7[60290]: 2026-03-10T07:06:17.999+0000 7f0742c49700 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.7 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-10T07:06:18.183 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:06:18 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-7[60290]: 2026-03-10T07:06:17.999+0000 7f0742c49700 -1 osd.7 121 *** Got signal Terminated *** 2026-03-10T07:06:18.183 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:06:18 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-7[60290]: 2026-03-10T07:06:17.999+0000 7f0742c49700 -1 osd.7 121 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-10T07:06:18.455 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:18 vm05 ceph-mon[76604]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["7"], "max": 16}]: dispatch 2026-03-10T07:06:18.455 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:18 vm05 ceph-mon[76604]: Upgrade: osd.7 is safe to restart 2026-03-10T07:06:18.455 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:18 vm05 ceph-mon[76604]: pgmap v124: 161 pgs: 161 active+clean; 457 KiB data, 244 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:06:18.455 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:18 vm05 ceph-mon[76604]: Upgrade: Updating osd.7 2026-03-10T07:06:18.455 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:18 vm05 ceph-mon[76604]: Deploying daemon osd.7 on vm05 2026-03-10T07:06:18.455 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:18 vm05 ceph-mon[76604]: osd.7 marked itself down and dead 2026-03-10T07:06:18.455 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:06:18 vm05 podman[84348]: 2026-03-10 07:06:18.197212658 +0000 UTC m=+0.211183282 container died 83d454094982b5f01919101356f90da26f3eae6c3c714d7f6f1a78803225799c (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-7, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, io.k8s.display-name=CentOS Stream 8, io.openshift.expose-services=, vendor=Red Hat, Inc., version=8, name=centos-stream, ceph=True, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, build-date=2022-05-03T08:36:31.336870, vcs-type=git, GIT_BRANCH=HEAD, architecture=x86_64, io.buildah.version=1.19.8, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, com.redhat.component=centos-stream-container, RELEASE=HEAD, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, maintainer=Guillaume Abrioux , GIT_CLEAN=True, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, release=754, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, CEPH_POINT_RELEASE=-17.2.0, GIT_REPO=https://github.com/ceph/ceph-container.git, distribution-scope=public, io.openshift.tags=base centos centos-stream) 2026-03-10T07:06:18.455 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:06:18 vm05 podman[84348]: 2026-03-10 07:06:18.220981371 +0000 UTC m=+0.234951995 container remove 83d454094982b5f01919101356f90da26f3eae6c3c714d7f6f1a78803225799c (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-7, ceph=True, release=754, distribution-scope=public, architecture=x86_64, io.k8s.display-name=CentOS Stream 8, CEPH_POINT_RELEASE=-17.2.0, build-date=2022-05-03T08:36:31.336870, com.redhat.component=centos-stream-container, io.buildah.version=1.19.8, version=8, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, name=centos-stream, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, RELEASE=HEAD, GIT_BRANCH=HEAD, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., GIT_CLEAN=True, maintainer=Guillaume Abrioux , vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, vcs-type=git, vendor=Red Hat, Inc., io.openshift.tags=base centos centos-stream, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, io.openshift.expose-services=, GIT_REPO=https://github.com/ceph/ceph-container.git) 2026-03-10T07:06:18.455 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:06:18 vm05 bash[84348]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-7 2026-03-10T07:06:18.455 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:06:18 vm05 podman[84416]: 2026-03-10 07:06:18.362565003 +0000 UTC m=+0.017126076 container create 23a6d79b757dd73897865712e9b5524176b672ff591c1802631e8d9c038e5091 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-7-deactivate, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.authors=Ceph Release Team , OSD_FLAVOR=default, org.label-schema.license=GPLv2, CEPH_REF=squid, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20260223, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3) 2026-03-10T07:06:18.455 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:06:18 vm05 podman[84416]: 2026-03-10 07:06:18.407582557 +0000 UTC m=+0.062143639 container init 23a6d79b757dd73897865712e9b5524176b672ff591c1802631e8d9c038e5091 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-7-deactivate, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.build-date=20260223, org.opencontainers.image.authors=Ceph Release Team , GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS) 2026-03-10T07:06:18.455 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:06:18 vm05 podman[84416]: 2026-03-10 07:06:18.410615171 +0000 UTC m=+0.065176244 container start 23a6d79b757dd73897865712e9b5524176b672ff591c1802631e8d9c038e5091 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-7-deactivate, ceph=True, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, org.opencontainers.image.authors=Ceph Release Team , GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, CEPH_REF=squid, org.label-schema.schema-version=1.0) 2026-03-10T07:06:18.455 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:06:18 vm05 podman[84416]: 2026-03-10 07:06:18.412638297 +0000 UTC m=+0.067199370 container attach 23a6d79b757dd73897865712e9b5524176b672ff591c1802631e8d9c038e5091 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-7-deactivate, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=squid, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.build-date=20260223, io.buildah.version=1.41.3, ceph=True, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image) 2026-03-10T07:06:18.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:18 vm02 ceph-mon[87980]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["7"], "max": 16}]: dispatch 2026-03-10T07:06:18.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:18 vm02 ceph-mon[87980]: Upgrade: osd.7 is safe to restart 2026-03-10T07:06:18.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:18 vm02 ceph-mon[87980]: pgmap v124: 161 pgs: 161 active+clean; 457 KiB data, 244 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:06:18.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:18 vm02 ceph-mon[87980]: Upgrade: Updating osd.7 2026-03-10T07:06:18.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:18 vm02 ceph-mon[87980]: Deploying daemon osd.7 on vm05 2026-03-10T07:06:18.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:18 vm02 ceph-mon[87980]: osd.7 marked itself down and dead 2026-03-10T07:06:18.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:18 vm02 ceph-mon[86149]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["7"], "max": 16}]: dispatch 2026-03-10T07:06:18.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:18 vm02 ceph-mon[86149]: Upgrade: osd.7 is safe to restart 2026-03-10T07:06:18.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:18 vm02 ceph-mon[86149]: pgmap v124: 161 pgs: 161 active+clean; 457 KiB data, 244 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:06:18.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:18 vm02 ceph-mon[86149]: Upgrade: Updating osd.7 2026-03-10T07:06:18.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:18 vm02 ceph-mon[86149]: Deploying daemon osd.7 on vm05 2026-03-10T07:06:18.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:18 vm02 ceph-mon[86149]: osd.7 marked itself down and dead 2026-03-10T07:06:18.755 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:06:18 vm05 podman[84416]: 2026-03-10 07:06:18.355982085 +0000 UTC m=+0.010543168 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T07:06:18.755 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:06:18 vm05 podman[84434]: 2026-03-10 07:06:18.555499248 +0000 UTC m=+0.010169790 container died 23a6d79b757dd73897865712e9b5524176b672ff591c1802631e8d9c038e5091 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-7-deactivate, CEPH_REF=squid, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image) 2026-03-10T07:06:18.755 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:06:18 vm05 podman[84434]: 2026-03-10 07:06:18.585897857 +0000 UTC m=+0.040568390 container remove 23a6d79b757dd73897865712e9b5524176b672ff591c1802631e8d9c038e5091 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-7-deactivate, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team , io.buildah.version=1.41.3, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223, CEPH_REF=squid) 2026-03-10T07:06:18.755 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:06:18 vm05 systemd[1]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@osd.7.service: Deactivated successfully. 2026-03-10T07:06:18.755 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:06:18 vm05 systemd[1]: Stopped Ceph osd.7 for 28bd35e6-1c4e-11f1-9057-21b3549603fc. 2026-03-10T07:06:18.755 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:06:18 vm05 systemd[1]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@osd.7.service: Consumed 4.154s CPU time. 2026-03-10T07:06:19.221 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:06:18 vm05 systemd[1]: Starting Ceph osd.7 for 28bd35e6-1c4e-11f1-9057-21b3549603fc... 2026-03-10T07:06:19.221 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:06:18 vm05 podman[84519]: 2026-03-10 07:06:18.873145293 +0000 UTC m=+0.020251043 container create 436cea49402444a592b21f1d5d701638023108dff67cb533075d9a29999972f0 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-7-activate, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.build-date=20260223, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team , OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_REF=squid, org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-10T07:06:19.221 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:06:18 vm05 podman[84519]: 2026-03-10 07:06:18.911096275 +0000 UTC m=+0.058202025 container init 436cea49402444a592b21f1d5d701638023108dff67cb533075d9a29999972f0 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-7-activate, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.build-date=20260223, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, OSD_FLAVOR=default, CEPH_REF=squid, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.license=GPLv2) 2026-03-10T07:06:19.221 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:06:18 vm05 podman[84519]: 2026-03-10 07:06:18.914586405 +0000 UTC m=+0.061692155 container start 436cea49402444a592b21f1d5d701638023108dff67cb533075d9a29999972f0 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-7-activate, org.label-schema.build-date=20260223, CEPH_REF=squid, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, io.buildah.version=1.41.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-10T07:06:19.221 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:06:18 vm05 podman[84519]: 2026-03-10 07:06:18.915439952 +0000 UTC m=+0.062545702 container attach 436cea49402444a592b21f1d5d701638023108dff67cb533075d9a29999972f0 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-7-activate, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, ceph=True, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.label-schema.build-date=20260223, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.authors=Ceph Release Team , GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid, OSD_FLAVOR=default) 2026-03-10T07:06:19.221 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:06:18 vm05 podman[84519]: 2026-03-10 07:06:18.866202953 +0000 UTC m=+0.013308703 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T07:06:19.221 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:06:18 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-7-activate[84530]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T07:06:19.221 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:06:18 vm05 bash[84519]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T07:06:19.221 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:06:19 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-7-activate[84530]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T07:06:19.221 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:06:19 vm05 bash[84519]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T07:06:19.474 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:19 vm05 ceph-mon[76604]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-10T07:06:19.474 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:19 vm05 ceph-mon[76604]: Health check failed: all OSDs are running squid or later but require_osd_release < squid (OSD_UPGRADE_FINISHED) 2026-03-10T07:06:19.474 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:19 vm05 ceph-mon[76604]: osdmap e122: 8 total, 7 up, 8 in 2026-03-10T07:06:19.474 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:19 vm05 ceph-mon[76604]: osdmap e123: 8 total, 7 up, 8 in 2026-03-10T07:06:19.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:19 vm02 ceph-mon[87980]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-10T07:06:19.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:19 vm02 ceph-mon[87980]: Health check failed: all OSDs are running squid or later but require_osd_release < squid (OSD_UPGRADE_FINISHED) 2026-03-10T07:06:19.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:19 vm02 ceph-mon[87980]: osdmap e122: 8 total, 7 up, 8 in 2026-03-10T07:06:19.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:19 vm02 ceph-mon[87980]: osdmap e123: 8 total, 7 up, 8 in 2026-03-10T07:06:19.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:19 vm02 ceph-mon[86149]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-10T07:06:19.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:19 vm02 ceph-mon[86149]: Health check failed: all OSDs are running squid or later but require_osd_release < squid (OSD_UPGRADE_FINISHED) 2026-03-10T07:06:19.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:19 vm02 ceph-mon[86149]: osdmap e122: 8 total, 7 up, 8 in 2026-03-10T07:06:19.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:19 vm02 ceph-mon[86149]: osdmap e123: 8 total, 7 up, 8 in 2026-03-10T07:06:19.754 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:06:19 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-7-activate[84530]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-10T07:06:19.754 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:06:19 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-7-activate[84530]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T07:06:19.754 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:06:19 vm05 bash[84519]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-10T07:06:19.754 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:06:19 vm05 bash[84519]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T07:06:19.754 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:06:19 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-7-activate[84530]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T07:06:19.754 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:06:19 vm05 bash[84519]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-10T07:06:19.754 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:06:19 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-7-activate[84530]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-7 2026-03-10T07:06:19.754 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:06:19 vm05 bash[84519]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-7 2026-03-10T07:06:19.754 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:06:19 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-7-activate[84530]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-f0e5b41c-6509-4d93-87f1-e0f3327d4301/osd-block-abb08ecf-81bc-4ddf-8ab4-1c14d5086c97 --path /var/lib/ceph/osd/ceph-7 --no-mon-config 2026-03-10T07:06:19.754 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:06:19 vm05 bash[84519]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-f0e5b41c-6509-4d93-87f1-e0f3327d4301/osd-block-abb08ecf-81bc-4ddf-8ab4-1c14d5086c97 --path /var/lib/ceph/osd/ceph-7 --no-mon-config 2026-03-10T07:06:20.226 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:20 vm05 ceph-mon[76604]: pgmap v126: 161 pgs: 7 peering, 20 stale+active+clean, 134 active+clean; 457 KiB data, 244 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-10T07:06:20.226 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:20 vm05 ceph-mon[76604]: Health check failed: Reduced data availability: 2 pgs inactive, 2 pgs peering (PG_AVAILABILITY) 2026-03-10T07:06:20.226 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:20 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:20.226 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:20 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:20.226 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:06:19 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-7-activate[84530]: Running command: /usr/bin/ln -snf /dev/ceph-f0e5b41c-6509-4d93-87f1-e0f3327d4301/osd-block-abb08ecf-81bc-4ddf-8ab4-1c14d5086c97 /var/lib/ceph/osd/ceph-7/block 2026-03-10T07:06:20.226 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:06:19 vm05 bash[84519]: Running command: /usr/bin/ln -snf /dev/ceph-f0e5b41c-6509-4d93-87f1-e0f3327d4301/osd-block-abb08ecf-81bc-4ddf-8ab4-1c14d5086c97 /var/lib/ceph/osd/ceph-7/block 2026-03-10T07:06:20.226 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:06:19 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-7-activate[84530]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-7/block 2026-03-10T07:06:20.226 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:06:19 vm05 bash[84519]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-7/block 2026-03-10T07:06:20.226 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:06:19 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-7-activate[84530]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-3 2026-03-10T07:06:20.226 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:06:19 vm05 bash[84519]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-3 2026-03-10T07:06:20.226 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:06:19 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-7-activate[84530]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-7 2026-03-10T07:06:20.226 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:06:19 vm05 bash[84519]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-7 2026-03-10T07:06:20.226 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:06:19 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-7-activate[84530]: --> ceph-volume lvm activate successful for osd ID: 7 2026-03-10T07:06:20.226 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:06:19 vm05 bash[84519]: --> ceph-volume lvm activate successful for osd ID: 7 2026-03-10T07:06:20.226 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:06:19 vm05 conmon[84530]: conmon 436cea49402444a592b2 : Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-436cea49402444a592b21f1d5d701638023108dff67cb533075d9a29999972f0.scope/container/memory.events 2026-03-10T07:06:20.226 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:06:19 vm05 podman[84519]: 2026-03-10 07:06:19.83317446 +0000 UTC m=+0.980280210 container died 436cea49402444a592b21f1d5d701638023108dff67cb533075d9a29999972f0 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-7-activate, org.label-schema.vendor=CentOS, ceph=True, CEPH_REF=squid, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team ) 2026-03-10T07:06:20.226 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:06:19 vm05 podman[84519]: 2026-03-10 07:06:19.850652153 +0000 UTC m=+0.997757894 container remove 436cea49402444a592b21f1d5d701638023108dff67cb533075d9a29999972f0 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-7-activate, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, ceph=True, CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, io.buildah.version=1.41.3, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS) 2026-03-10T07:06:20.226 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:06:19 vm05 podman[84768]: 2026-03-10 07:06:19.951476988 +0000 UTC m=+0.018822300 container create d45f9e33923c28e203e214aae9478ec9919f30631ad5361f1695b61c7e802871 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-7, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, io.buildah.version=1.41.3, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=squid, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team , OSD_FLAVOR=default, org.label-schema.build-date=20260223, org.label-schema.vendor=CentOS) 2026-03-10T07:06:20.226 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:06:19 vm05 podman[84768]: 2026-03-10 07:06:19.991548719 +0000 UTC m=+0.058894040 container init d45f9e33923c28e203e214aae9478ec9919f30631ad5361f1695b61c7e802871 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-7, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, ceph=True, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.build-date=20260223, io.buildah.version=1.41.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS) 2026-03-10T07:06:20.226 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:06:19 vm05 podman[84768]: 2026-03-10 07:06:19.995307371 +0000 UTC m=+0.062652692 container start d45f9e33923c28e203e214aae9478ec9919f30631ad5361f1695b61c7e802871 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-7, org.label-schema.license=GPLv2, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team , GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, org.label-schema.schema-version=1.0) 2026-03-10T07:06:20.226 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:06:20 vm05 bash[84768]: d45f9e33923c28e203e214aae9478ec9919f30631ad5361f1695b61c7e802871 2026-03-10T07:06:20.226 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:06:20 vm05 podman[84768]: 2026-03-10 07:06:19.943189201 +0000 UTC m=+0.010534533 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T07:06:20.226 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:06:20 vm05 systemd[1]: Started Ceph osd.7 for 28bd35e6-1c4e-11f1-9057-21b3549603fc. 2026-03-10T07:06:20.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:20 vm02 ceph-mon[87980]: pgmap v126: 161 pgs: 7 peering, 20 stale+active+clean, 134 active+clean; 457 KiB data, 244 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-10T07:06:20.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:20 vm02 ceph-mon[87980]: Health check failed: Reduced data availability: 2 pgs inactive, 2 pgs peering (PG_AVAILABILITY) 2026-03-10T07:06:20.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:20 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:20.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:20 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:20.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:20 vm02 ceph-mon[86149]: pgmap v126: 161 pgs: 7 peering, 20 stale+active+clean, 134 active+clean; 457 KiB data, 244 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-10T07:06:20.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:20 vm02 ceph-mon[86149]: Health check failed: Reduced data availability: 2 pgs inactive, 2 pgs peering (PG_AVAILABILITY) 2026-03-10T07:06:20.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:20 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:20.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:20 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:21.137 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:06:20 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-7[84778]: 2026-03-10T07:06:20.822+0000 7f642ab4f740 -1 Falling back to public interface 2026-03-10T07:06:21.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:21 vm05 ceph-mon[76604]: Health check failed: Degraded data redundancy: 26/630 objects degraded (4.127%), 10 pgs degraded (PG_DEGRADED) 2026-03-10T07:06:21.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:21 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:21.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:21 vm02 ceph-mon[87980]: Health check failed: Degraded data redundancy: 26/630 objects degraded (4.127%), 10 pgs degraded (PG_DEGRADED) 2026-03-10T07:06:21.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:21 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:21.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:21 vm02 ceph-mon[86149]: Health check failed: Degraded data redundancy: 26/630 objects degraded (4.127%), 10 pgs degraded (PG_DEGRADED) 2026-03-10T07:06:21.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:21 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:21.932 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:06:21 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-7[84778]: 2026-03-10T07:06:21.664+0000 7f642ab4f740 -1 osd.7 0 read_superblock omap replica is missing. 2026-03-10T07:06:21.932 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:06:21 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-7[84778]: 2026-03-10T07:06:21.708+0000 7f642ab4f740 -1 osd.7 121 log_to_monitors true 2026-03-10T07:06:22.085 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:06:21 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: ::ffff:192.168.123.105 - - [10/Mar/2026:07:06:21] "GET /metrics HTTP/1.1" 200 38108 "" "Prometheus/2.51.0" 2026-03-10T07:06:22.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:22 vm02 ceph-mon[87980]: pgmap v128: 161 pgs: 13 active+undersized, 7 peering, 14 stale+active+clean, 10 active+undersized+degraded, 117 active+clean; 457 KiB data, 244 MiB used, 160 GiB / 160 GiB avail; 26/630 objects degraded (4.127%) 2026-03-10T07:06:22.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:22 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:22.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:22 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:22.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:22 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:22.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:22 vm02 ceph-mon[87980]: from='osd.7 [v2:192.168.123.105:6824/1739103346,v1:192.168.123.105:6825/1739103346]' entity='osd.7' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]: dispatch 2026-03-10T07:06:22.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:22 vm02 ceph-mon[87980]: from='osd.7 ' entity='osd.7' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]: dispatch 2026-03-10T07:06:22.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:22 vm02 ceph-mon[86149]: pgmap v128: 161 pgs: 13 active+undersized, 7 peering, 14 stale+active+clean, 10 active+undersized+degraded, 117 active+clean; 457 KiB data, 244 MiB used, 160 GiB / 160 GiB avail; 26/630 objects degraded (4.127%) 2026-03-10T07:06:22.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:22 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:22.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:22 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:22.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:22 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:22.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:22 vm02 ceph-mon[86149]: from='osd.7 [v2:192.168.123.105:6824/1739103346,v1:192.168.123.105:6825/1739103346]' entity='osd.7' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]: dispatch 2026-03-10T07:06:22.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:22 vm02 ceph-mon[86149]: from='osd.7 ' entity='osd.7' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]: dispatch 2026-03-10T07:06:22.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:22 vm05 ceph-mon[76604]: pgmap v128: 161 pgs: 13 active+undersized, 7 peering, 14 stale+active+clean, 10 active+undersized+degraded, 117 active+clean; 457 KiB data, 244 MiB used, 160 GiB / 160 GiB avail; 26/630 objects degraded (4.127%) 2026-03-10T07:06:22.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:22 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:22.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:22 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:22.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:22 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:22.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:22 vm05 ceph-mon[76604]: from='osd.7 [v2:192.168.123.105:6824/1739103346,v1:192.168.123.105:6825/1739103346]' entity='osd.7' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]: dispatch 2026-03-10T07:06:22.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:22 vm05 ceph-mon[76604]: from='osd.7 ' entity='osd.7' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]: dispatch 2026-03-10T07:06:24.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:23 vm05 ceph-mon[76604]: from='osd.7 ' entity='osd.7' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]': finished 2026-03-10T07:06:24.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:23 vm05 ceph-mon[76604]: osdmap e124: 8 total, 7 up, 8 in 2026-03-10T07:06:24.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:23 vm05 ceph-mon[76604]: from='osd.7 [v2:192.168.123.105:6824/1739103346,v1:192.168.123.105:6825/1739103346]' entity='osd.7' cmd=[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm05", "root=default"]}]: dispatch 2026-03-10T07:06:24.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:23 vm05 ceph-mon[76604]: from='osd.7 ' entity='osd.7' cmd=[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm05", "root=default"]}]: dispatch 2026-03-10T07:06:24.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:23 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:24.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:23 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:24.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:23 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:06:24.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:23 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T07:06:24.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:23 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:24.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:23 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T07:06:24.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:23 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:06:24.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:23 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:06:24.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:23 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:06:24.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:23 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:06:24.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:23 vm05 ceph-mon[76604]: Upgrade: Setting container_image for all osd 2026-03-10T07:06:24.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:23 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:24.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:23 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.0"}]: dispatch 2026-03-10T07:06:24.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:23 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.0"}]': finished 2026-03-10T07:06:24.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:23 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.1"}]: dispatch 2026-03-10T07:06:24.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:23 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.1"}]': finished 2026-03-10T07:06:24.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:23 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.2"}]: dispatch 2026-03-10T07:06:24.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:23 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.2"}]': finished 2026-03-10T07:06:24.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:23 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.3"}]: dispatch 2026-03-10T07:06:24.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:23 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.3"}]': finished 2026-03-10T07:06:24.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:23 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.4"}]: dispatch 2026-03-10T07:06:24.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:23 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.4"}]': finished 2026-03-10T07:06:24.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:23 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.5"}]: dispatch 2026-03-10T07:06:24.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:23 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.5"}]': finished 2026-03-10T07:06:24.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:23 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.6"}]: dispatch 2026-03-10T07:06:24.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:23 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.6"}]': finished 2026-03-10T07:06:24.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:23 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.7"}]: dispatch 2026-03-10T07:06:24.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:23 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.7"}]': finished 2026-03-10T07:06:24.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:23 vm05 ceph-mon[76604]: Upgrade: Setting require_osd_release to 19 squid 2026-03-10T07:06:24.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:23 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd require-osd-release", "release": "squid"}]: dispatch 2026-03-10T07:06:24.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:23 vm05 ceph-mon[76604]: pgmap v130: 161 pgs: 36 active+undersized, 7 peering, 26 active+undersized+degraded, 92 active+clean; 457 KiB data, 266 MiB used, 160 GiB / 160 GiB avail; 82/630 objects degraded (13.016%) 2026-03-10T07:06:24.004 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:06:23 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-7[84778]: 2026-03-10T07:06:23.642+0000 7f64220f9640 -1 osd.7 121 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-10T07:06:24.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:23 vm02 ceph-mon[87980]: from='osd.7 ' entity='osd.7' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]': finished 2026-03-10T07:06:24.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:23 vm02 ceph-mon[87980]: osdmap e124: 8 total, 7 up, 8 in 2026-03-10T07:06:24.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:23 vm02 ceph-mon[87980]: from='osd.7 [v2:192.168.123.105:6824/1739103346,v1:192.168.123.105:6825/1739103346]' entity='osd.7' cmd=[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm05", "root=default"]}]: dispatch 2026-03-10T07:06:24.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:23 vm02 ceph-mon[87980]: from='osd.7 ' entity='osd.7' cmd=[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm05", "root=default"]}]: dispatch 2026-03-10T07:06:24.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:23 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:24.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:23 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:24.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:23 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:06:24.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:23 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T07:06:24.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:23 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:24.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:23 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T07:06:24.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:23 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:06:24.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:23 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:06:24.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:23 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:06:24.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:23 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:06:24.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:23 vm02 ceph-mon[87980]: Upgrade: Setting container_image for all osd 2026-03-10T07:06:24.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:23 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:24.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:23 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.0"}]: dispatch 2026-03-10T07:06:24.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:23 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.0"}]': finished 2026-03-10T07:06:24.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:23 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.1"}]: dispatch 2026-03-10T07:06:24.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:23 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.1"}]': finished 2026-03-10T07:06:24.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:23 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.2"}]: dispatch 2026-03-10T07:06:24.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:23 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.2"}]': finished 2026-03-10T07:06:24.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:23 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.3"}]: dispatch 2026-03-10T07:06:24.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:23 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.3"}]': finished 2026-03-10T07:06:24.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:23 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.4"}]: dispatch 2026-03-10T07:06:24.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:23 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.4"}]': finished 2026-03-10T07:06:24.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:23 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.5"}]: dispatch 2026-03-10T07:06:24.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:23 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.5"}]': finished 2026-03-10T07:06:24.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:23 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.6"}]: dispatch 2026-03-10T07:06:24.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:23 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.6"}]': finished 2026-03-10T07:06:24.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:23 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.7"}]: dispatch 2026-03-10T07:06:24.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:23 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.7"}]': finished 2026-03-10T07:06:24.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:23 vm02 ceph-mon[87980]: Upgrade: Setting require_osd_release to 19 squid 2026-03-10T07:06:24.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:23 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd require-osd-release", "release": "squid"}]: dispatch 2026-03-10T07:06:24.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:23 vm02 ceph-mon[87980]: pgmap v130: 161 pgs: 36 active+undersized, 7 peering, 26 active+undersized+degraded, 92 active+clean; 457 KiB data, 266 MiB used, 160 GiB / 160 GiB avail; 82/630 objects degraded (13.016%) 2026-03-10T07:06:24.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:23 vm02 ceph-mon[86149]: from='osd.7 ' entity='osd.7' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]': finished 2026-03-10T07:06:24.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:23 vm02 ceph-mon[86149]: osdmap e124: 8 total, 7 up, 8 in 2026-03-10T07:06:24.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:23 vm02 ceph-mon[86149]: from='osd.7 [v2:192.168.123.105:6824/1739103346,v1:192.168.123.105:6825/1739103346]' entity='osd.7' cmd=[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm05", "root=default"]}]: dispatch 2026-03-10T07:06:24.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:23 vm02 ceph-mon[86149]: from='osd.7 ' entity='osd.7' cmd=[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm05", "root=default"]}]: dispatch 2026-03-10T07:06:24.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:23 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:24.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:23 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:24.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:23 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:06:24.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:23 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T07:06:24.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:23 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:24.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:23 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T07:06:24.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:23 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:06:24.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:23 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:06:24.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:23 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:06:24.086 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:23 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:06:24.086 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:23 vm02 ceph-mon[86149]: Upgrade: Setting container_image for all osd 2026-03-10T07:06:24.086 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:23 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:24.086 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:23 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.0"}]: dispatch 2026-03-10T07:06:24.086 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:23 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.0"}]': finished 2026-03-10T07:06:24.086 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:23 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.1"}]: dispatch 2026-03-10T07:06:24.086 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:23 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.1"}]': finished 2026-03-10T07:06:24.086 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:23 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.2"}]: dispatch 2026-03-10T07:06:24.086 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:23 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.2"}]': finished 2026-03-10T07:06:24.086 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:23 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.3"}]: dispatch 2026-03-10T07:06:24.086 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:23 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.3"}]': finished 2026-03-10T07:06:24.086 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:23 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.4"}]: dispatch 2026-03-10T07:06:24.086 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:23 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.4"}]': finished 2026-03-10T07:06:24.086 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:23 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.5"}]: dispatch 2026-03-10T07:06:24.086 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:23 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.5"}]': finished 2026-03-10T07:06:24.086 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:23 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.6"}]: dispatch 2026-03-10T07:06:24.086 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:23 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.6"}]': finished 2026-03-10T07:06:24.086 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:23 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.7"}]: dispatch 2026-03-10T07:06:24.086 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:23 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.7"}]': finished 2026-03-10T07:06:24.086 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:23 vm02 ceph-mon[86149]: Upgrade: Setting require_osd_release to 19 squid 2026-03-10T07:06:24.086 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:23 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd require-osd-release", "release": "squid"}]: dispatch 2026-03-10T07:06:24.086 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:23 vm02 ceph-mon[86149]: pgmap v130: 161 pgs: 36 active+undersized, 7 peering, 26 active+undersized+degraded, 92 active+clean; 457 KiB data, 266 MiB used, 160 GiB / 160 GiB avail; 82/630 objects degraded (13.016%) 2026-03-10T07:06:24.503 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:06:24 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:06:24.147Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.7\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.7\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"28bd35e6-1c4e-11f1-9057-21b3549603fc\", cluster_addr=\"192.168.123.105\", device_class=\"hdd\", hostname=\"vm05\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.105\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.7\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.105\", device_class=\"hdd\", hostname=\"vm05\", instance=\"192.168.123.105:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.105\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T07:06:24.674 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:24 vm02 ceph-mon[87980]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-10T07:06:24.674 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:24 vm02 ceph-mon[87980]: Health check cleared: OSD_UPGRADE_FINISHED (was: all OSDs are running squid or later but require_osd_release < squid) 2026-03-10T07:06:24.674 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:24 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd='[{"prefix": "osd require-osd-release", "release": "squid"}]': finished 2026-03-10T07:06:24.674 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:24 vm02 ceph-mon[87980]: osd.7 [v2:192.168.123.105:6824/1739103346,v1:192.168.123.105:6825/1739103346] boot 2026-03-10T07:06:24.674 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:24 vm02 ceph-mon[87980]: osdmap e125: 8 total, 8 up, 8 in 2026-03-10T07:06:24.674 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:24 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T07:06:24.674 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:24 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:06:24.674 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:24 vm02 ceph-mon[87980]: Upgrade: Setting container_image for all mds 2026-03-10T07:06:24.674 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:24 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:24.674 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:24 vm02 ceph-mon[87980]: Upgrade: Updating rgw.foo.vm02.kkmsll (1/4) 2026-03-10T07:06:24.674 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:24 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:24.674 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:24 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm02.kkmsll", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T07:06:24.674 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:24 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:06:24.674 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:24 vm02 ceph-mon[87980]: Deploying daemon rgw.foo.vm02.kkmsll on vm02 2026-03-10T07:06:24.674 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:24 vm02 ceph-mon[86149]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-10T07:06:24.674 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:24 vm02 ceph-mon[86149]: Health check cleared: OSD_UPGRADE_FINISHED (was: all OSDs are running squid or later but require_osd_release < squid) 2026-03-10T07:06:24.674 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:24 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd='[{"prefix": "osd require-osd-release", "release": "squid"}]': finished 2026-03-10T07:06:24.674 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:24 vm02 ceph-mon[86149]: osd.7 [v2:192.168.123.105:6824/1739103346,v1:192.168.123.105:6825/1739103346] boot 2026-03-10T07:06:24.674 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:24 vm02 ceph-mon[86149]: osdmap e125: 8 total, 8 up, 8 in 2026-03-10T07:06:24.674 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:24 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T07:06:24.674 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:24 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:06:24.674 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:24 vm02 ceph-mon[86149]: Upgrade: Setting container_image for all mds 2026-03-10T07:06:24.674 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:24 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:24.675 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:24 vm02 ceph-mon[86149]: Upgrade: Updating rgw.foo.vm02.kkmsll (1/4) 2026-03-10T07:06:24.675 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:24 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:24.675 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:24 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm02.kkmsll", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T07:06:24.675 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:24 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:06:24.675 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:24 vm02 ceph-mon[86149]: Deploying daemon rgw.foo.vm02.kkmsll on vm02 2026-03-10T07:06:25.005 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:24 vm05 ceph-mon[76604]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-10T07:06:25.005 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:24 vm05 ceph-mon[76604]: Health check cleared: OSD_UPGRADE_FINISHED (was: all OSDs are running squid or later but require_osd_release < squid) 2026-03-10T07:06:25.005 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:24 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd='[{"prefix": "osd require-osd-release", "release": "squid"}]': finished 2026-03-10T07:06:25.005 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:24 vm05 ceph-mon[76604]: osd.7 [v2:192.168.123.105:6824/1739103346,v1:192.168.123.105:6825/1739103346] boot 2026-03-10T07:06:25.005 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:24 vm05 ceph-mon[76604]: osdmap e125: 8 total, 8 up, 8 in 2026-03-10T07:06:25.005 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:24 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T07:06:25.005 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:24 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:06:25.005 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:24 vm05 ceph-mon[76604]: Upgrade: Setting container_image for all mds 2026-03-10T07:06:25.005 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:24 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:25.005 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:24 vm05 ceph-mon[76604]: Upgrade: Updating rgw.foo.vm02.kkmsll (1/4) 2026-03-10T07:06:25.005 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:24 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:25.005 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:24 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm02.kkmsll", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T07:06:25.005 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:24 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:06:25.005 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:24 vm05 ceph-mon[76604]: Deploying daemon rgw.foo.vm02.kkmsll on vm02 2026-03-10T07:06:25.979 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:25 vm02 ceph-mon[87980]: OSD bench result of 27291.490917 IOPS is not within the threshold limit range of 50.000000 IOPS and 500.000000 IOPS for osd.7. IOPS capacity is unchanged at 315.000000 IOPS. The recommendation is to establish the osd's IOPS capacity using other benchmark tools (e.g. Fio) and then override osd_mclock_max_capacity_iops_[hdd|ssd]. 2026-03-10T07:06:25.979 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:25 vm02 ceph-mon[87980]: pgmap v132: 161 pgs: 43 active+undersized, 26 active+undersized+degraded, 92 active+clean; 457 KiB data, 266 MiB used, 160 GiB / 160 GiB avail; 82/630 objects degraded (13.016%) 2026-03-10T07:06:25.979 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:25 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:25.979 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:25 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T07:06:25.979 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:25 vm02 ceph-mon[87980]: osdmap e126: 8 total, 8 up, 8 in 2026-03-10T07:06:25.979 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:25 vm02 ceph-mon[86149]: OSD bench result of 27291.490917 IOPS is not within the threshold limit range of 50.000000 IOPS and 500.000000 IOPS for osd.7. IOPS capacity is unchanged at 315.000000 IOPS. The recommendation is to establish the osd's IOPS capacity using other benchmark tools (e.g. Fio) and then override osd_mclock_max_capacity_iops_[hdd|ssd]. 2026-03-10T07:06:25.979 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:25 vm02 ceph-mon[86149]: pgmap v132: 161 pgs: 43 active+undersized, 26 active+undersized+degraded, 92 active+clean; 457 KiB data, 266 MiB used, 160 GiB / 160 GiB avail; 82/630 objects degraded (13.016%) 2026-03-10T07:06:25.979 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:25 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:25.979 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:25 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T07:06:25.979 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:25 vm02 ceph-mon[86149]: osdmap e126: 8 total, 8 up, 8 in 2026-03-10T07:06:26.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:25 vm05 ceph-mon[76604]: OSD bench result of 27291.490917 IOPS is not within the threshold limit range of 50.000000 IOPS and 500.000000 IOPS for osd.7. IOPS capacity is unchanged at 315.000000 IOPS. The recommendation is to establish the osd's IOPS capacity using other benchmark tools (e.g. Fio) and then override osd_mclock_max_capacity_iops_[hdd|ssd]. 2026-03-10T07:06:26.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:25 vm05 ceph-mon[76604]: pgmap v132: 161 pgs: 43 active+undersized, 26 active+undersized+degraded, 92 active+clean; 457 KiB data, 266 MiB used, 160 GiB / 160 GiB avail; 82/630 objects degraded (13.016%) 2026-03-10T07:06:26.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:25 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:26.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:25 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T07:06:26.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:25 vm05 ceph-mon[76604]: osdmap e126: 8 total, 8 up, 8 in 2026-03-10T07:06:26.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:26 vm02 ceph-mon[86149]: Health check cleared: PG_AVAILABILITY (was: Reduced data availability: 2 pgs inactive, 2 pgs peering) 2026-03-10T07:06:26.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:26 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:26.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:26 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:26.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:26 vm02 ceph-mon[86149]: from='client.15180 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:06:26.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:26 vm02 ceph-mon[86149]: Upgrade: Updating rgw.smpl.vm02.kyvfxo (2/4) 2026-03-10T07:06:26.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:26 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:26.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:26 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.smpl.vm02.kyvfxo", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T07:06:26.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:26 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:06:26.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:26 vm02 ceph-mon[86149]: Deploying daemon rgw.smpl.vm02.kyvfxo on vm02 2026-03-10T07:06:26.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:26 vm02 ceph-mon[86149]: Health check update: Degraded data redundancy: 82/630 objects degraded (13.016%), 26 pgs degraded (PG_DEGRADED) 2026-03-10T07:06:26.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:26 vm02 ceph-mon[87980]: Health check cleared: PG_AVAILABILITY (was: Reduced data availability: 2 pgs inactive, 2 pgs peering) 2026-03-10T07:06:26.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:26 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:26.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:26 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:26.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:26 vm02 ceph-mon[87980]: from='client.15180 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:06:26.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:26 vm02 ceph-mon[87980]: Upgrade: Updating rgw.smpl.vm02.kyvfxo (2/4) 2026-03-10T07:06:26.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:26 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:26.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:26 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.smpl.vm02.kyvfxo", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T07:06:26.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:26 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:06:26.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:26 vm02 ceph-mon[87980]: Deploying daemon rgw.smpl.vm02.kyvfxo on vm02 2026-03-10T07:06:26.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:26 vm02 ceph-mon[87980]: Health check update: Degraded data redundancy: 82/630 objects degraded (13.016%), 26 pgs degraded (PG_DEGRADED) 2026-03-10T07:06:26.948 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:26 vm05 ceph-mon[76604]: Health check cleared: PG_AVAILABILITY (was: Reduced data availability: 2 pgs inactive, 2 pgs peering) 2026-03-10T07:06:26.948 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:26 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:26.948 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:26 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:26.948 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:26 vm05 ceph-mon[76604]: from='client.15180 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:06:26.948 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:26 vm05 ceph-mon[76604]: Upgrade: Updating rgw.smpl.vm02.kyvfxo (2/4) 2026-03-10T07:06:26.948 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:26 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:26.948 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:26 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.smpl.vm02.kyvfxo", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T07:06:26.948 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:26 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:06:26.948 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:26 vm05 ceph-mon[76604]: Deploying daemon rgw.smpl.vm02.kyvfxo on vm02 2026-03-10T07:06:26.948 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:26 vm05 ceph-mon[76604]: Health check update: Degraded data redundancy: 82/630 objects degraded (13.016%), 26 pgs degraded (PG_DEGRADED) 2026-03-10T07:06:27.254 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:06:26 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:06:26.947Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm02\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"28bd35e6-1c4e-11f1-9057-21b3549603fc\", domainname=\"(none)\", instance=\"vm02\", job=\"node\", machine=\"x86_64\", nodename=\"vm02\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm02\", job=\"node\", machine=\"x86_64\", nodename=\"vm02\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T07:06:27.743 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:27 vm02 ceph-mon[87980]: pgmap v134: 161 pgs: 18 active+undersized, 12 active+undersized+degraded, 131 active+clean; 457 KiB data, 270 MiB used, 160 GiB / 160 GiB avail; 37 KiB/s rd, 170 B/s wr, 56 op/s; 50/630 objects degraded (7.937%) 2026-03-10T07:06:27.744 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:27 vm02 ceph-mon[86149]: pgmap v134: 161 pgs: 18 active+undersized, 12 active+undersized+degraded, 131 active+clean; 457 KiB data, 270 MiB used, 160 GiB / 160 GiB avail; 37 KiB/s rd, 170 B/s wr, 56 op/s; 50/630 objects degraded (7.937%) 2026-03-10T07:06:27.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:27 vm05 ceph-mon[76604]: pgmap v134: 161 pgs: 18 active+undersized, 12 active+undersized+degraded, 131 active+clean; 457 KiB data, 270 MiB used, 160 GiB / 160 GiB avail; 37 KiB/s rd, 170 B/s wr, 56 op/s; 50/630 objects degraded (7.937%) 2026-03-10T07:06:29.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:28 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:29.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:28 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:29.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:28 vm02 ceph-mon[87980]: Upgrade: Updating rgw.foo.vm05.bmslvs (3/4) 2026-03-10T07:06:29.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:28 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:29.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:28 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm05.bmslvs", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T07:06:29.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:28 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:06:29.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:28 vm02 ceph-mon[87980]: Deploying daemon rgw.foo.vm05.bmslvs on vm05 2026-03-10T07:06:29.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:28 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:29.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:28 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:29.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:28 vm02 ceph-mon[86149]: Upgrade: Updating rgw.foo.vm05.bmslvs (3/4) 2026-03-10T07:06:29.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:28 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:29.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:28 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm05.bmslvs", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T07:06:29.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:28 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:06:29.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:28 vm02 ceph-mon[86149]: Deploying daemon rgw.foo.vm05.bmslvs on vm05 2026-03-10T07:06:29.093 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:28 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:29.093 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:28 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:29.093 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:28 vm05 ceph-mon[76604]: Upgrade: Updating rgw.foo.vm05.bmslvs (3/4) 2026-03-10T07:06:29.093 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:28 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:29.093 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:28 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm05.bmslvs", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T07:06:29.093 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:28 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:06:29.093 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:28 vm05 ceph-mon[76604]: Deploying daemon rgw.foo.vm05.bmslvs on vm05 2026-03-10T07:06:30.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:30 vm05 ceph-mon[76604]: pgmap v135: 161 pgs: 161 active+clean; 457 KiB data, 270 MiB used, 160 GiB / 160 GiB avail; 69 KiB/s rd, 159 B/s wr, 104 op/s 2026-03-10T07:06:30.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:30 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:30.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:30 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:30.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:30 vm05 ceph-mon[76604]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 50/630 objects degraded (7.937%), 12 pgs degraded) 2026-03-10T07:06:30.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:30 vm05 ceph-mon[76604]: Cluster is now healthy 2026-03-10T07:06:30.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:30 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:30.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:30 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.smpl.vm05.xjafam", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T07:06:30.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:30 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:06:30.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:30 vm02 ceph-mon[87980]: pgmap v135: 161 pgs: 161 active+clean; 457 KiB data, 270 MiB used, 160 GiB / 160 GiB avail; 69 KiB/s rd, 159 B/s wr, 104 op/s 2026-03-10T07:06:30.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:30 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:30.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:30 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:30.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:30 vm02 ceph-mon[87980]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 50/630 objects degraded (7.937%), 12 pgs degraded) 2026-03-10T07:06:30.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:30 vm02 ceph-mon[87980]: Cluster is now healthy 2026-03-10T07:06:30.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:30 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:30.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:30 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.smpl.vm05.xjafam", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T07:06:30.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:30 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:06:30.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:30 vm02 ceph-mon[86149]: pgmap v135: 161 pgs: 161 active+clean; 457 KiB data, 270 MiB used, 160 GiB / 160 GiB avail; 69 KiB/s rd, 159 B/s wr, 104 op/s 2026-03-10T07:06:30.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:30 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:30.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:30 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:30.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:30 vm02 ceph-mon[86149]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 50/630 objects degraded (7.937%), 12 pgs degraded) 2026-03-10T07:06:30.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:30 vm02 ceph-mon[86149]: Cluster is now healthy 2026-03-10T07:06:30.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:30 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:30.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:30 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.smpl.vm05.xjafam", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T07:06:30.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:30 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:06:31.737 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:31 vm05 ceph-mon[76604]: Upgrade: Updating rgw.smpl.vm05.xjafam (4/4) 2026-03-10T07:06:31.737 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:31 vm05 ceph-mon[76604]: Deploying daemon rgw.smpl.vm05.xjafam on vm05 2026-03-10T07:06:31.737 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:31 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:31.737 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:31 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:31.834 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:06:31 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: ::ffff:192.168.123.105 - - [10/Mar/2026:07:06:31] "GET /metrics HTTP/1.1" 200 37978 "" "Prometheus/2.51.0" 2026-03-10T07:06:31.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:31 vm02 ceph-mon[87980]: Upgrade: Updating rgw.smpl.vm05.xjafam (4/4) 2026-03-10T07:06:31.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:31 vm02 ceph-mon[87980]: Deploying daemon rgw.smpl.vm05.xjafam on vm05 2026-03-10T07:06:31.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:31 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:31.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:31 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:31.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:31 vm02 ceph-mon[86149]: Upgrade: Updating rgw.smpl.vm05.xjafam (4/4) 2026-03-10T07:06:31.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:31 vm02 ceph-mon[86149]: Deploying daemon rgw.smpl.vm05.xjafam on vm05 2026-03-10T07:06:31.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:31 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:31.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:31 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:32.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:32 vm05 ceph-mon[76604]: pgmap v136: 161 pgs: 161 active+clean; 457 KiB data, 278 MiB used, 160 GiB / 160 GiB avail; 125 KiB/s rd, 127 B/s wr, 193 op/s 2026-03-10T07:06:32.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:32 vm02 ceph-mon[86149]: pgmap v136: 161 pgs: 161 active+clean; 457 KiB data, 278 MiB used, 160 GiB / 160 GiB avail; 125 KiB/s rd, 127 B/s wr, 193 op/s 2026-03-10T07:06:32.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:32 vm02 ceph-mon[87980]: pgmap v136: 161 pgs: 161 active+clean; 457 KiB data, 278 MiB used, 160 GiB / 160 GiB avail; 125 KiB/s rd, 127 B/s wr, 193 op/s 2026-03-10T07:06:33.632 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:33 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:33.633 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:33 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:33.633 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:33 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:33.633 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:33 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:33.633 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:33 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:33.633 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:33 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:33.633 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:33 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:33.633 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:33 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:33.633 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:33 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:33.896 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:33 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:33.896 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:33 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:33.897 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:33 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:33.897 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:33 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:33.897 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:33 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:33.897 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:33 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:33.897 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:33 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:34.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:33 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:34.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:33 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:34.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:33 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:34.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:33 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:34.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:33 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:34.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:33 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:34.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:33 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:34.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:33 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:34.503 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:06:34 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:06:34.146Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)\", cluster=\"28bd35e6-1c4e-11f1-9057-21b3549603fc\", cluster_addr=\"192.168.123.102\", device_class=\"hdd\", hostname=\"vm02\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.102\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.102\", device_class=\"hdd\", hostname=\"vm02\", instance=\"192.168.123.105:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.102\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T07:06:34.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:34 vm02 ceph-mon[86149]: pgmap v137: 161 pgs: 161 active+clean; 457 KiB data, 286 MiB used, 160 GiB / 160 GiB avail; 250 KiB/s rd, 460 B/s wr, 385 op/s 2026-03-10T07:06:34.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:34 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:34.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:34 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:34.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:34 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:34.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:34 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:34.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:34 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:06:34.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:34 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T07:06:34.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:34 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:34.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:34 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T07:06:34.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:34 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:06:34.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:34 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:06:34.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:34 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:06:34.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:34 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:06:34.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:34 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:06:34.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:34 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:06:34.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:34 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:34.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:34 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.foo.vm02.kkmsll"}]: dispatch 2026-03-10T07:06:34.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:34 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.foo.vm02.kkmsll"}]': finished 2026-03-10T07:06:34.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:34 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.foo.vm05.bmslvs"}]: dispatch 2026-03-10T07:06:34.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:34 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.foo.vm05.bmslvs"}]': finished 2026-03-10T07:06:34.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:34 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.smpl.vm02.kyvfxo"}]: dispatch 2026-03-10T07:06:34.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:34 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.smpl.vm02.kyvfxo"}]': finished 2026-03-10T07:06:34.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:34 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.smpl.vm05.xjafam"}]: dispatch 2026-03-10T07:06:34.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:34 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.smpl.vm05.xjafam"}]': finished 2026-03-10T07:06:34.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:34 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:06:34.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:34 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:34.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:34 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:06:34.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:34 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:06:34.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:34 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:34.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:34 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:34.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:34 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm02.iphfbm", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T07:06:34.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:34 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:06:34.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:34 vm02 ceph-mon[87980]: pgmap v137: 161 pgs: 161 active+clean; 457 KiB data, 286 MiB used, 160 GiB / 160 GiB avail; 250 KiB/s rd, 460 B/s wr, 385 op/s 2026-03-10T07:06:34.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:34 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:34.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:34 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:34.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:34 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:34.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:34 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:34.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:34 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:06:34.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:34 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T07:06:34.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:34 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:34.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:34 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T07:06:34.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:34 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:06:34.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:34 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:06:34.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:34 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:06:34.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:34 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:06:34.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:34 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:06:34.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:34 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:06:34.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:34 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:34.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:34 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.foo.vm02.kkmsll"}]: dispatch 2026-03-10T07:06:34.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:34 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.foo.vm02.kkmsll"}]': finished 2026-03-10T07:06:34.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:34 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.foo.vm05.bmslvs"}]: dispatch 2026-03-10T07:06:34.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:34 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.foo.vm05.bmslvs"}]': finished 2026-03-10T07:06:34.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:34 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.smpl.vm02.kyvfxo"}]: dispatch 2026-03-10T07:06:34.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:34 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.smpl.vm02.kyvfxo"}]': finished 2026-03-10T07:06:34.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:34 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.smpl.vm05.xjafam"}]: dispatch 2026-03-10T07:06:34.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:34 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.smpl.vm05.xjafam"}]': finished 2026-03-10T07:06:34.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:34 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:06:34.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:34 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:34.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:34 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:06:34.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:34 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:06:34.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:34 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:34.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:34 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:34.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:34 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm02.iphfbm", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T07:06:34.836 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:34 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:06:35.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:34 vm05 ceph-mon[76604]: pgmap v137: 161 pgs: 161 active+clean; 457 KiB data, 286 MiB used, 160 GiB / 160 GiB avail; 250 KiB/s rd, 460 B/s wr, 385 op/s 2026-03-10T07:06:35.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:34 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:35.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:34 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:35.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:34 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:35.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:34 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:35.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:34 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:06:35.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:34 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T07:06:35.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:34 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:35.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:34 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T07:06:35.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:34 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:06:35.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:34 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:06:35.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:34 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:06:35.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:34 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:06:35.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:34 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:06:35.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:34 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:06:35.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:34 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:35.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:34 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.foo.vm02.kkmsll"}]: dispatch 2026-03-10T07:06:35.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:34 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.foo.vm02.kkmsll"}]': finished 2026-03-10T07:06:35.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:34 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.foo.vm05.bmslvs"}]: dispatch 2026-03-10T07:06:35.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:34 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.foo.vm05.bmslvs"}]': finished 2026-03-10T07:06:35.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:34 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.smpl.vm02.kyvfxo"}]: dispatch 2026-03-10T07:06:35.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:34 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.smpl.vm02.kyvfxo"}]': finished 2026-03-10T07:06:35.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:34 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.smpl.vm05.xjafam"}]: dispatch 2026-03-10T07:06:35.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:34 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.smpl.vm05.xjafam"}]': finished 2026-03-10T07:06:35.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:34 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:06:35.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:34 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:35.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:34 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:06:35.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:34 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:06:35.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:34 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:35.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:34 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:35.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:34 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm02.iphfbm", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-10T07:06:35.004 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:34 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:06:36.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:35 vm05 ceph-mon[76604]: Detected new or changed devices on vm05 2026-03-10T07:06:36.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:35 vm05 ceph-mon[76604]: Detected new or changed devices on vm02 2026-03-10T07:06:36.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:35 vm05 ceph-mon[76604]: Upgrade: Setting container_image for all rgw 2026-03-10T07:06:36.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:35 vm05 ceph-mon[76604]: Upgrade: Setting container_image for all rbd-mirror 2026-03-10T07:06:36.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:35 vm05 ceph-mon[76604]: Upgrade: Setting container_image for all ceph-exporter 2026-03-10T07:06:36.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:35 vm05 ceph-mon[76604]: Upgrade: Updating iscsi.foo.vm02.iphfbm 2026-03-10T07:06:36.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:35 vm05 ceph-mon[76604]: Deploying daemon iscsi.foo.vm02.iphfbm on vm02 2026-03-10T07:06:36.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:35 vm05 ceph-mon[76604]: pgmap v138: 161 pgs: 161 active+clean; 457 KiB data, 290 MiB used, 160 GiB / 160 GiB avail; 222 KiB/s rd, 409 B/s wr, 342 op/s 2026-03-10T07:06:36.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:35 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:36.084 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:35 vm02 ceph-mon[87980]: Detected new or changed devices on vm05 2026-03-10T07:06:36.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:35 vm02 ceph-mon[87980]: Detected new or changed devices on vm02 2026-03-10T07:06:36.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:35 vm02 ceph-mon[87980]: Upgrade: Setting container_image for all rgw 2026-03-10T07:06:36.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:35 vm02 ceph-mon[87980]: Upgrade: Setting container_image for all rbd-mirror 2026-03-10T07:06:36.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:35 vm02 ceph-mon[87980]: Upgrade: Setting container_image for all ceph-exporter 2026-03-10T07:06:36.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:35 vm02 ceph-mon[87980]: Upgrade: Updating iscsi.foo.vm02.iphfbm 2026-03-10T07:06:36.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:35 vm02 ceph-mon[87980]: Deploying daemon iscsi.foo.vm02.iphfbm on vm02 2026-03-10T07:06:36.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:35 vm02 ceph-mon[87980]: pgmap v138: 161 pgs: 161 active+clean; 457 KiB data, 290 MiB used, 160 GiB / 160 GiB avail; 222 KiB/s rd, 409 B/s wr, 342 op/s 2026-03-10T07:06:36.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:35 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:36.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:35 vm02 ceph-mon[86149]: Detected new or changed devices on vm05 2026-03-10T07:06:36.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:35 vm02 ceph-mon[86149]: Detected new or changed devices on vm02 2026-03-10T07:06:36.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:35 vm02 ceph-mon[86149]: Upgrade: Setting container_image for all rgw 2026-03-10T07:06:36.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:35 vm02 ceph-mon[86149]: Upgrade: Setting container_image for all rbd-mirror 2026-03-10T07:06:36.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:35 vm02 ceph-mon[86149]: Upgrade: Setting container_image for all ceph-exporter 2026-03-10T07:06:36.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:35 vm02 ceph-mon[86149]: Upgrade: Updating iscsi.foo.vm02.iphfbm 2026-03-10T07:06:36.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:35 vm02 ceph-mon[86149]: Deploying daemon iscsi.foo.vm02.iphfbm on vm02 2026-03-10T07:06:36.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:35 vm02 ceph-mon[86149]: pgmap v138: 161 pgs: 161 active+clean; 457 KiB data, 290 MiB used, 160 GiB / 160 GiB avail; 222 KiB/s rd, 409 B/s wr, 342 op/s 2026-03-10T07:06:36.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:35 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:36.947 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:36 vm05 ceph-mon[76604]: from='client.15180 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:06:37.084 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:36 vm02 ceph-mon[87980]: from='client.15180 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:06:37.084 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:36 vm02 ceph-mon[86149]: from='client.15180 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:06:37.253 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:06:36 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:06:36.947Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm02\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"28bd35e6-1c4e-11f1-9057-21b3549603fc\", domainname=\"(none)\", instance=\"vm02\", job=\"node\", machine=\"x86_64\", nodename=\"vm02\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm02\", job=\"node\", machine=\"x86_64\", nodename=\"vm02\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T07:06:38.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:37 vm05 ceph-mon[76604]: pgmap v139: 161 pgs: 161 active+clean; 457 KiB data, 290 MiB used, 160 GiB / 160 GiB avail; 220 KiB/s rd, 345 B/s wr, 339 op/s 2026-03-10T07:06:38.084 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:37 vm02 ceph-mon[87980]: pgmap v139: 161 pgs: 161 active+clean; 457 KiB data, 290 MiB used, 160 GiB / 160 GiB avail; 220 KiB/s rd, 345 B/s wr, 339 op/s 2026-03-10T07:06:38.084 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:37 vm02 ceph-mon[86149]: pgmap v139: 161 pgs: 161 active+clean; 457 KiB data, 290 MiB used, 160 GiB / 160 GiB avail; 220 KiB/s rd, 345 B/s wr, 339 op/s 2026-03-10T07:06:40.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:40 vm05 ceph-mon[76604]: pgmap v140: 161 pgs: 161 active+clean; 457 KiB data, 294 MiB used, 160 GiB / 160 GiB avail; 198 KiB/s rd, 255 B/s wr, 306 op/s 2026-03-10T07:06:40.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:40 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:40.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:40 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T07:06:40.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:40 vm02 ceph-mon[87980]: pgmap v140: 161 pgs: 161 active+clean; 457 KiB data, 294 MiB used, 160 GiB / 160 GiB avail; 198 KiB/s rd, 255 B/s wr, 306 op/s 2026-03-10T07:06:40.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:40 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:40.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:40 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T07:06:40.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:40 vm02 ceph-mon[86149]: pgmap v140: 161 pgs: 161 active+clean; 457 KiB data, 294 MiB used, 160 GiB / 160 GiB avail; 198 KiB/s rd, 255 B/s wr, 306 op/s 2026-03-10T07:06:40.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:40 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:40.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:40 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T07:06:40.871 INFO:teuthology.orchestra.run.vm02.stdout:true 2026-03-10T07:06:41.288 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T07:06:41.288 INFO:teuthology.orchestra.run.vm02.stdout:alertmanager.a vm02 *:9093,9094 running (3m) 8s ago 8m 22.5M - 0.25.0 c8568f914cd2 520cbcc5ad98 2026-03-10T07:06:41.288 INFO:teuthology.orchestra.run.vm02.stdout:grafana.a vm05 *:3000 running (3m) 8s ago 7m 46.4M - dad864ee21e9 34567dcb4b51 2026-03-10T07:06:41.288 INFO:teuthology.orchestra.run.vm02.stdout:iscsi.foo.vm02.iphfbm vm02 running (3m) 8s ago 7m 52.4M - 3.5 e1d6a67b021e a06caff18850 2026-03-10T07:06:41.288 INFO:teuthology.orchestra.run.vm02.stdout:mgr.x vm05 *:8443,9283,8765 running (3m) 8s ago 9m 487M - 19.2.3-678-ge911bdeb 654f31e6858e cdd4c8db2d43 2026-03-10T07:06:41.288 INFO:teuthology.orchestra.run.vm02.stdout:mgr.y vm02 *:8443,9283,8765 running (3m) 8s ago 10m 562M - 19.2.3-678-ge911bdeb 654f31e6858e e1dc22afcf31 2026-03-10T07:06:41.288 INFO:teuthology.orchestra.run.vm02.stdout:mon.a vm02 running (3m) 8s ago 10m 51.5M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 194e84dd73c4 2026-03-10T07:06:41.288 INFO:teuthology.orchestra.run.vm02.stdout:mon.b vm05 running (3m) 8s ago 9m 40.1M 2048M 19.2.3-678-ge911bdeb 654f31e6858e e901cead026d 2026-03-10T07:06:41.288 INFO:teuthology.orchestra.run.vm02.stdout:mon.c vm02 running (3m) 8s ago 9m 42.4M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 959c5054b3d9 2026-03-10T07:06:41.288 INFO:teuthology.orchestra.run.vm02.stdout:node-exporter.a vm02 *:9100 running (3m) 8s ago 8m 9525k - 1.7.0 72c9c2088986 0bc0b34c732a 2026-03-10T07:06:41.288 INFO:teuthology.orchestra.run.vm02.stdout:node-exporter.b vm05 *:9100 running (3m) 8s ago 8m 9.91M - 1.7.0 72c9c2088986 0129ed456f9d 2026-03-10T07:06:41.288 INFO:teuthology.orchestra.run.vm02.stdout:osd.0 vm02 running (2m) 8s ago 9m 74.6M 4096M 19.2.3-678-ge911bdeb 654f31e6858e bf86ac25f7fb 2026-03-10T07:06:41.288 INFO:teuthology.orchestra.run.vm02.stdout:osd.1 vm02 running (2m) 8s ago 9m 54.5M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 6a939dbe6adc 2026-03-10T07:06:41.288 INFO:teuthology.orchestra.run.vm02.stdout:osd.2 vm02 running (2m) 8s ago 9m 49.8M 4096M 19.2.3-678-ge911bdeb 654f31e6858e b0f12ddcd6b3 2026-03-10T07:06:41.288 INFO:teuthology.orchestra.run.vm02.stdout:osd.3 vm02 running (108s) 8s ago 9m 75.9M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 396c050a0b4f 2026-03-10T07:06:41.288 INFO:teuthology.orchestra.run.vm02.stdout:osd.4 vm05 running (85s) 8s ago 8m 54.7M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 05476fb1f9d8 2026-03-10T07:06:41.288 INFO:teuthology.orchestra.run.vm02.stdout:osd.5 vm05 running (64s) 8s ago 8m 71.6M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 37ab69a8d103 2026-03-10T07:06:41.288 INFO:teuthology.orchestra.run.vm02.stdout:osd.6 vm05 running (42s) 8s ago 8m 47.7M 4096M 19.2.3-678-ge911bdeb 654f31e6858e affae9a808f3 2026-03-10T07:06:41.288 INFO:teuthology.orchestra.run.vm02.stdout:osd.7 vm05 running (21s) 8s ago 8m 69.8M 4096M 19.2.3-678-ge911bdeb 654f31e6858e d45f9e33923c 2026-03-10T07:06:41.288 INFO:teuthology.orchestra.run.vm02.stdout:prometheus.a vm05 *:9095 running (3m) 8s ago 8m 44.3M - 2.51.0 1d3b7f56885b 2f14ff887bc7 2026-03-10T07:06:41.289 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.kkmsll vm02 *:8000 running (15s) 8s ago 7m 100M - 19.2.3-678-ge911bdeb 654f31e6858e c3e18d1a8070 2026-03-10T07:06:41.289 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm05.bmslvs vm05 *:8000 running (11s) 8s ago 7m 100M - 19.2.3-678-ge911bdeb 654f31e6858e 2f5744dcd331 2026-03-10T07:06:41.289 INFO:teuthology.orchestra.run.vm02.stdout:rgw.smpl.vm02.kyvfxo vm02 *:80 running (13s) 8s ago 7m 99.1M - 19.2.3-678-ge911bdeb 654f31e6858e cec8929a3f84 2026-03-10T07:06:41.289 INFO:teuthology.orchestra.run.vm02.stdout:rgw.smpl.vm05.xjafam vm05 *:80 running (10s) 8s ago 7m 54.1M - 19.2.3-678-ge911bdeb 654f31e6858e 6b376bc4bc8f 2026-03-10T07:06:41.522 INFO:teuthology.orchestra.run.vm02.stdout:{ 2026-03-10T07:06:41.522 INFO:teuthology.orchestra.run.vm02.stdout: "mon": { 2026-03-10T07:06:41.522 INFO:teuthology.orchestra.run.vm02.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 3 2026-03-10T07:06:41.522 INFO:teuthology.orchestra.run.vm02.stdout: }, 2026-03-10T07:06:41.522 INFO:teuthology.orchestra.run.vm02.stdout: "mgr": { 2026-03-10T07:06:41.522 INFO:teuthology.orchestra.run.vm02.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-10T07:06:41.522 INFO:teuthology.orchestra.run.vm02.stdout: }, 2026-03-10T07:06:41.522 INFO:teuthology.orchestra.run.vm02.stdout: "osd": { 2026-03-10T07:06:41.522 INFO:teuthology.orchestra.run.vm02.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 8 2026-03-10T07:06:41.522 INFO:teuthology.orchestra.run.vm02.stdout: }, 2026-03-10T07:06:41.522 INFO:teuthology.orchestra.run.vm02.stdout: "rgw": { 2026-03-10T07:06:41.522 INFO:teuthology.orchestra.run.vm02.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 4 2026-03-10T07:06:41.522 INFO:teuthology.orchestra.run.vm02.stdout: }, 2026-03-10T07:06:41.522 INFO:teuthology.orchestra.run.vm02.stdout: "overall": { 2026-03-10T07:06:41.522 INFO:teuthology.orchestra.run.vm02.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 17 2026-03-10T07:06:41.523 INFO:teuthology.orchestra.run.vm02.stdout: } 2026-03-10T07:06:41.523 INFO:teuthology.orchestra.run.vm02.stdout:} 2026-03-10T07:06:41.726 INFO:teuthology.orchestra.run.vm02.stdout:{ 2026-03-10T07:06:41.726 INFO:teuthology.orchestra.run.vm02.stdout: "target_image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", 2026-03-10T07:06:41.726 INFO:teuthology.orchestra.run.vm02.stdout: "in_progress": true, 2026-03-10T07:06:41.726 INFO:teuthology.orchestra.run.vm02.stdout: "which": "Upgrading all daemon types on all hosts", 2026-03-10T07:06:41.726 INFO:teuthology.orchestra.run.vm02.stdout: "services_complete": [ 2026-03-10T07:06:41.726 INFO:teuthology.orchestra.run.vm02.stdout: "mgr", 2026-03-10T07:06:41.726 INFO:teuthology.orchestra.run.vm02.stdout: "rgw", 2026-03-10T07:06:41.726 INFO:teuthology.orchestra.run.vm02.stdout: "osd", 2026-03-10T07:06:41.726 INFO:teuthology.orchestra.run.vm02.stdout: "mon" 2026-03-10T07:06:41.726 INFO:teuthology.orchestra.run.vm02.stdout: ], 2026-03-10T07:06:41.726 INFO:teuthology.orchestra.run.vm02.stdout: "progress": "17/23 daemons upgraded", 2026-03-10T07:06:41.726 INFO:teuthology.orchestra.run.vm02.stdout: "message": "Currently upgrading iscsi daemons", 2026-03-10T07:06:41.726 INFO:teuthology.orchestra.run.vm02.stdout: "is_paused": false 2026-03-10T07:06:41.726 INFO:teuthology.orchestra.run.vm02.stdout:} 2026-03-10T07:06:41.963 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:06:41 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: ::ffff:192.168.123.105 - - [10/Mar/2026:07:06:41] "GET /metrics HTTP/1.1" 200 38179 "" "Prometheus/2.51.0" 2026-03-10T07:06:41.963 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_OK 2026-03-10T07:06:42.334 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:42 vm02 ceph-mon[87980]: from='client.34422 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:06:42.334 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:42 vm02 ceph-mon[87980]: from='client.44338 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:06:42.334 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:42 vm02 ceph-mon[87980]: pgmap v141: 161 pgs: 161 active+clean; 457 KiB data, 294 MiB used, 160 GiB / 160 GiB avail; 180 KiB/s rd, 255 B/s wr, 279 op/s 2026-03-10T07:06:42.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:42 vm02 ceph-mon[87980]: from='client.44344 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:06:42.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:42 vm02 ceph-mon[87980]: from='client.? 192.168.123.102:0/823555040' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:06:42.335 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:42 vm02 ceph-mon[87980]: from='client.? 192.168.123.102:0/2368800123' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T07:06:42.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:42 vm02 ceph-mon[86149]: from='client.34422 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:06:42.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:42 vm02 ceph-mon[86149]: from='client.44338 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:06:42.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:42 vm02 ceph-mon[86149]: pgmap v141: 161 pgs: 161 active+clean; 457 KiB data, 294 MiB used, 160 GiB / 160 GiB avail; 180 KiB/s rd, 255 B/s wr, 279 op/s 2026-03-10T07:06:42.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:42 vm02 ceph-mon[86149]: from='client.44344 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:06:42.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:42 vm02 ceph-mon[86149]: from='client.? 192.168.123.102:0/823555040' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:06:42.335 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:42 vm02 ceph-mon[86149]: from='client.? 192.168.123.102:0/2368800123' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T07:06:42.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:42 vm05 ceph-mon[76604]: from='client.34422 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:06:42.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:42 vm05 ceph-mon[76604]: from='client.44338 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:06:42.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:42 vm05 ceph-mon[76604]: pgmap v141: 161 pgs: 161 active+clean; 457 KiB data, 294 MiB used, 160 GiB / 160 GiB avail; 180 KiB/s rd, 255 B/s wr, 279 op/s 2026-03-10T07:06:42.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:42 vm05 ceph-mon[76604]: from='client.44344 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:06:42.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:42 vm05 ceph-mon[76604]: from='client.? 192.168.123.102:0/823555040' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:06:42.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:42 vm05 ceph-mon[76604]: from='client.? 192.168.123.102:0/2368800123' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T07:06:43.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:43 vm05 ceph-mon[76604]: from='client.44353 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:06:43.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:43 vm02 ceph-mon[87980]: from='client.44353 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:06:43.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:43 vm02 ceph-mon[86149]: from='client.44353 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:06:44.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:44 vm05 ceph-mon[76604]: pgmap v142: 161 pgs: 161 active+clean; 457 KiB data, 294 MiB used, 160 GiB / 160 GiB avail; 135 KiB/s rd, 255 B/s wr, 206 op/s 2026-03-10T07:06:44.503 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:06:44 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:06:44.146Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)\", cluster=\"28bd35e6-1c4e-11f1-9057-21b3549603fc\", cluster_addr=\"192.168.123.102\", device_class=\"hdd\", hostname=\"vm02\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.102\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.102\", device_class=\"hdd\", hostname=\"vm02\", instance=\"192.168.123.105:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.102\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T07:06:44.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:44 vm02 ceph-mon[87980]: pgmap v142: 161 pgs: 161 active+clean; 457 KiB data, 294 MiB used, 160 GiB / 160 GiB avail; 135 KiB/s rd, 255 B/s wr, 206 op/s 2026-03-10T07:06:44.584 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:44 vm02 ceph-mon[86149]: pgmap v142: 161 pgs: 161 active+clean; 457 KiB data, 294 MiB used, 160 GiB / 160 GiB avail; 135 KiB/s rd, 255 B/s wr, 206 op/s 2026-03-10T07:06:45.459 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:06:45 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: 2026-03-10T07:06:45.303+0000 7f03b321a640 -1 log_channel(cephadm) log [ERR] : Upgrade: Paused due to UPGRADE_REDEPLOY_DAEMON: Upgrading daemon iscsi.foo.vm02.iphfbm on host vm02 failed. 2026-03-10T07:06:46.319 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:46 vm02 ceph-mon[87980]: pgmap v143: 161 pgs: 161 active+clean; 457 KiB data, 294 MiB used, 160 GiB / 160 GiB avail; 33 KiB/s rd, 0 B/s wr, 50 op/s 2026-03-10T07:06:46.319 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:46 vm02 ceph-mon[87980]: Upgrade: Paused due to UPGRADE_REDEPLOY_DAEMON: Upgrading daemon iscsi.foo.vm02.iphfbm on host vm02 failed. 2026-03-10T07:06:46.319 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:46 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:46.319 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:46 vm02 ceph-mon[87980]: pgmap v144: 161 pgs: 161 active+clean; 457 KiB data, 294 MiB used, 160 GiB / 160 GiB avail; 38 KiB/s rd, 0 B/s wr, 59 op/s 2026-03-10T07:06:46.319 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:46 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:46.319 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:46 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:46.319 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:46 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:46.319 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:46 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:46.547 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:46 vm05 ceph-mon[76604]: pgmap v143: 161 pgs: 161 active+clean; 457 KiB data, 294 MiB used, 160 GiB / 160 GiB avail; 33 KiB/s rd, 0 B/s wr, 50 op/s 2026-03-10T07:06:46.547 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:46 vm05 ceph-mon[76604]: Upgrade: Paused due to UPGRADE_REDEPLOY_DAEMON: Upgrading daemon iscsi.foo.vm02.iphfbm on host vm02 failed. 2026-03-10T07:06:46.547 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:46 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:46.547 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:46 vm05 ceph-mon[76604]: pgmap v144: 161 pgs: 161 active+clean; 457 KiB data, 294 MiB used, 160 GiB / 160 GiB avail; 38 KiB/s rd, 0 B/s wr, 59 op/s 2026-03-10T07:06:46.547 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:46 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:46.547 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:46 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:46.547 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:46 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:46.547 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:46 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:46.574 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:46 vm02 ceph-mon[86149]: pgmap v143: 161 pgs: 161 active+clean; 457 KiB data, 294 MiB used, 160 GiB / 160 GiB avail; 33 KiB/s rd, 0 B/s wr, 50 op/s 2026-03-10T07:06:46.574 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:46 vm02 ceph-mon[86149]: Upgrade: Paused due to UPGRADE_REDEPLOY_DAEMON: Upgrading daemon iscsi.foo.vm02.iphfbm on host vm02 failed. 2026-03-10T07:06:46.574 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:46 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:46.574 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:46 vm02 ceph-mon[86149]: pgmap v144: 161 pgs: 161 active+clean; 457 KiB data, 294 MiB used, 160 GiB / 160 GiB avail; 38 KiB/s rd, 0 B/s wr, 59 op/s 2026-03-10T07:06:46.574 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:46 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:46.574 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:46 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:46.575 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:46 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:46.575 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:46 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:47.253 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:06:46 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:06:46.948Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm02\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"28bd35e6-1c4e-11f1-9057-21b3549603fc\", domainname=\"(none)\", instance=\"vm02\", job=\"node\", machine=\"x86_64\", nodename=\"vm02\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm02\", job=\"node\", machine=\"x86_64\", nodename=\"vm02\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T07:06:47.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:47 vm02 ceph-mon[87980]: Health check failed: Upgrading daemon iscsi.foo.vm02.iphfbm on host vm02 failed. (UPGRADE_REDEPLOY_DAEMON) 2026-03-10T07:06:47.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:47 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:47.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:47 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:47.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:47 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:47.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:47 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:47.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:47 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:06:47.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:47 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T07:06:47.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:47 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:47.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:47 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:06:47.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:47 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T07:06:47.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:47 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:47.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:47 vm02 ceph-mon[86149]: Health check failed: Upgrading daemon iscsi.foo.vm02.iphfbm on host vm02 failed. (UPGRADE_REDEPLOY_DAEMON) 2026-03-10T07:06:47.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:47 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:47.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:47 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:47.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:47 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:47.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:47 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:47.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:47 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:06:47.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:47 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T07:06:47.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:47 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:47.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:47 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:06:47.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:47 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T07:06:47.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:47 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:47.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:47 vm05 ceph-mon[76604]: Health check failed: Upgrading daemon iscsi.foo.vm02.iphfbm on host vm02 failed. (UPGRADE_REDEPLOY_DAEMON) 2026-03-10T07:06:47.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:47 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:47.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:47 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:47.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:47 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:47.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:47 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:47.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:47 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:06:47.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:47 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T07:06:47.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:47 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:47.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:47 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:06:47.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:47 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T07:06:47.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:47 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:48.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:48 vm02 ceph-mon[87980]: pgmap v145: 161 pgs: 161 active+clean; 457 KiB data, 294 MiB used, 160 GiB / 160 GiB avail; 523 B/s rd, 0 op/s 2026-03-10T07:06:48.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:48 vm02 ceph-mon[87980]: Checking dashboard <-> RGW credentials 2026-03-10T07:06:48.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:48 vm02 ceph-mon[87980]: pgmap v146: 161 pgs: 161 active+clean; 457 KiB data, 294 MiB used, 160 GiB / 160 GiB avail; 1017 B/s rd, 0 op/s 2026-03-10T07:06:48.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:48 vm02 ceph-mon[87980]: pgmap v147: 161 pgs: 161 active+clean; 457 KiB data, 294 MiB used, 160 GiB / 160 GiB avail; 1.3 KiB/s rd, 1 op/s 2026-03-10T07:06:48.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:48 vm02 ceph-mon[87980]: Health check failed: 1 failed cephadm daemon(s) (CEPHADM_FAILED_DAEMON) 2026-03-10T07:06:48.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:48 vm02 ceph-mon[86149]: pgmap v145: 161 pgs: 161 active+clean; 457 KiB data, 294 MiB used, 160 GiB / 160 GiB avail; 523 B/s rd, 0 op/s 2026-03-10T07:06:48.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:48 vm02 ceph-mon[86149]: Checking dashboard <-> RGW credentials 2026-03-10T07:06:48.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:48 vm02 ceph-mon[86149]: pgmap v146: 161 pgs: 161 active+clean; 457 KiB data, 294 MiB used, 160 GiB / 160 GiB avail; 1017 B/s rd, 0 op/s 2026-03-10T07:06:48.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:48 vm02 ceph-mon[86149]: pgmap v147: 161 pgs: 161 active+clean; 457 KiB data, 294 MiB used, 160 GiB / 160 GiB avail; 1.3 KiB/s rd, 1 op/s 2026-03-10T07:06:48.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:48 vm02 ceph-mon[86149]: Health check failed: 1 failed cephadm daemon(s) (CEPHADM_FAILED_DAEMON) 2026-03-10T07:06:48.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:48 vm05 ceph-mon[76604]: pgmap v145: 161 pgs: 161 active+clean; 457 KiB data, 294 MiB used, 160 GiB / 160 GiB avail; 523 B/s rd, 0 op/s 2026-03-10T07:06:48.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:48 vm05 ceph-mon[76604]: Checking dashboard <-> RGW credentials 2026-03-10T07:06:48.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:48 vm05 ceph-mon[76604]: pgmap v146: 161 pgs: 161 active+clean; 457 KiB data, 294 MiB used, 160 GiB / 160 GiB avail; 1017 B/s rd, 0 op/s 2026-03-10T07:06:48.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:48 vm05 ceph-mon[76604]: pgmap v147: 161 pgs: 161 active+clean; 457 KiB data, 294 MiB used, 160 GiB / 160 GiB avail; 1.3 KiB/s rd, 1 op/s 2026-03-10T07:06:48.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:48 vm05 ceph-mon[76604]: Health check failed: 1 failed cephadm daemon(s) (CEPHADM_FAILED_DAEMON) 2026-03-10T07:06:50.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:50 vm02 ceph-mon[87980]: pgmap v148: 161 pgs: 161 active+clean; 457 KiB data, 294 MiB used, 160 GiB / 160 GiB avail; 1.7 KiB/s rd, 1 op/s 2026-03-10T07:06:50.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:50 vm02 ceph-mon[86149]: pgmap v148: 161 pgs: 161 active+clean; 457 KiB data, 294 MiB used, 160 GiB / 160 GiB avail; 1.7 KiB/s rd, 1 op/s 2026-03-10T07:06:50.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:50 vm05 ceph-mon[76604]: pgmap v148: 161 pgs: 161 active+clean; 457 KiB data, 294 MiB used, 160 GiB / 160 GiB avail; 1.7 KiB/s rd, 1 op/s 2026-03-10T07:06:52.085 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:06:51 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: ::ffff:192.168.123.105 - - [10/Mar/2026:07:06:51] "GET /metrics HTTP/1.1" 200 38179 "" "Prometheus/2.51.0" 2026-03-10T07:06:52.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:52 vm05 ceph-mon[76604]: pgmap v149: 161 pgs: 161 active+clean; 457 KiB data, 294 MiB used, 160 GiB / 160 GiB avail; 1.8 KiB/s rd, 1 op/s 2026-03-10T07:06:52.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:52 vm02 ceph-mon[87980]: pgmap v149: 161 pgs: 161 active+clean; 457 KiB data, 294 MiB used, 160 GiB / 160 GiB avail; 1.8 KiB/s rd, 1 op/s 2026-03-10T07:06:52.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:52 vm02 ceph-mon[86149]: pgmap v149: 161 pgs: 161 active+clean; 457 KiB data, 294 MiB used, 160 GiB / 160 GiB avail; 1.8 KiB/s rd, 1 op/s 2026-03-10T07:06:54.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:54 vm05 ceph-mon[76604]: pgmap v150: 161 pgs: 161 active+clean; 457 KiB data, 294 MiB used, 160 GiB / 160 GiB avail; 3.2 KiB/s rd, 3 op/s 2026-03-10T07:06:54.503 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:06:54 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:06:54.147Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)\", cluster=\"28bd35e6-1c4e-11f1-9057-21b3549603fc\", cluster_addr=\"192.168.123.102\", device_class=\"hdd\", hostname=\"vm02\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.102\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.102\", device_class=\"hdd\", hostname=\"vm02\", instance=\"192.168.123.105:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.102\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T07:06:54.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:54 vm02 ceph-mon[87980]: pgmap v150: 161 pgs: 161 active+clean; 457 KiB data, 294 MiB used, 160 GiB / 160 GiB avail; 3.2 KiB/s rd, 3 op/s 2026-03-10T07:06:54.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:54 vm02 ceph-mon[86149]: pgmap v150: 161 pgs: 161 active+clean; 457 KiB data, 294 MiB used, 160 GiB / 160 GiB avail; 3.2 KiB/s rd, 3 op/s 2026-03-10T07:06:56.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:56 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:56.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:56 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T07:06:56.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:56 vm05 ceph-mon[76604]: pgmap v151: 161 pgs: 161 active+clean; 457 KiB data, 294 MiB used, 160 GiB / 160 GiB avail; 3.0 KiB/s rd, 3 op/s 2026-03-10T07:06:56.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:56 vm05 ceph-mon[76604]: from='client.? 192.168.123.102:0/1681400414' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-10T07:06:56.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:56 vm05 ceph-mon[76604]: from='client.? 192.168.123.102:0/1936171693' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:6801/2472641779"}]: dispatch 2026-03-10T07:06:56.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:56 vm05 ceph-mon[76604]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:6801/2472641779"}]: dispatch 2026-03-10T07:06:56.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:56 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:56.586 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:56 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T07:06:56.586 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:56 vm02 ceph-mon[86149]: pgmap v151: 161 pgs: 161 active+clean; 457 KiB data, 294 MiB used, 160 GiB / 160 GiB avail; 3.0 KiB/s rd, 3 op/s 2026-03-10T07:06:56.586 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:56 vm02 ceph-mon[86149]: from='client.? 192.168.123.102:0/1681400414' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-10T07:06:56.586 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:56 vm02 ceph-mon[86149]: from='client.? 192.168.123.102:0/1936171693' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:6801/2472641779"}]: dispatch 2026-03-10T07:06:56.586 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:56 vm02 ceph-mon[86149]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:6801/2472641779"}]: dispatch 2026-03-10T07:06:56.586 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:56 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:06:56.586 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:56 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T07:06:56.586 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:56 vm02 ceph-mon[87980]: pgmap v151: 161 pgs: 161 active+clean; 457 KiB data, 294 MiB used, 160 GiB / 160 GiB avail; 3.0 KiB/s rd, 3 op/s 2026-03-10T07:06:56.586 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:56 vm02 ceph-mon[87980]: from='client.? 192.168.123.102:0/1681400414' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-10T07:06:56.586 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:56 vm02 ceph-mon[87980]: from='client.? 192.168.123.102:0/1936171693' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:6801/2472641779"}]: dispatch 2026-03-10T07:06:56.586 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:56 vm02 ceph-mon[87980]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:6801/2472641779"}]: dispatch 2026-03-10T07:06:57.104 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:06:56 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:06:56.948Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm02\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"28bd35e6-1c4e-11f1-9057-21b3549603fc\", domainname=\"(none)\", instance=\"vm02\", job=\"node\", machine=\"x86_64\", nodename=\"vm02\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm02\", job=\"node\", machine=\"x86_64\", nodename=\"vm02\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T07:06:57.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:57 vm05 ceph-mon[76604]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:6801/2472641779"}]': finished 2026-03-10T07:06:57.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:57 vm05 ceph-mon[76604]: osdmap e127: 8 total, 8 up, 8 in 2026-03-10T07:06:57.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:57 vm05 ceph-mon[76604]: from='client.? 192.168.123.102:0/3685657629' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/1143934037"}]: dispatch 2026-03-10T07:06:57.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:57 vm05 ceph-mon[76604]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/1143934037"}]: dispatch 2026-03-10T07:06:57.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:57 vm02 ceph-mon[87980]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:6801/2472641779"}]': finished 2026-03-10T07:06:57.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:57 vm02 ceph-mon[87980]: osdmap e127: 8 total, 8 up, 8 in 2026-03-10T07:06:57.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:57 vm02 ceph-mon[87980]: from='client.? 192.168.123.102:0/3685657629' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/1143934037"}]: dispatch 2026-03-10T07:06:57.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:57 vm02 ceph-mon[87980]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/1143934037"}]: dispatch 2026-03-10T07:06:57.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:57 vm02 ceph-mon[86149]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:6801/2472641779"}]': finished 2026-03-10T07:06:57.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:57 vm02 ceph-mon[86149]: osdmap e127: 8 total, 8 up, 8 in 2026-03-10T07:06:57.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:57 vm02 ceph-mon[86149]: from='client.? 192.168.123.102:0/3685657629' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/1143934037"}]: dispatch 2026-03-10T07:06:57.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:57 vm02 ceph-mon[86149]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/1143934037"}]: dispatch 2026-03-10T07:06:58.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:58 vm05 ceph-mon[76604]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/1143934037"}]': finished 2026-03-10T07:06:58.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:58 vm05 ceph-mon[76604]: osdmap e128: 8 total, 8 up, 8 in 2026-03-10T07:06:58.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:58 vm05 ceph-mon[76604]: pgmap v154: 161 pgs: 161 active+clean; 457 KiB data, 294 MiB used, 160 GiB / 160 GiB avail; 2.1 KiB/s rd, 2 op/s 2026-03-10T07:06:58.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:58 vm05 ceph-mon[76604]: from='client.? 192.168.123.102:0/3381596939' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/1311453216"}]: dispatch 2026-03-10T07:06:58.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:58 vm02 ceph-mon[87980]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/1143934037"}]': finished 2026-03-10T07:06:58.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:58 vm02 ceph-mon[87980]: osdmap e128: 8 total, 8 up, 8 in 2026-03-10T07:06:58.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:58 vm02 ceph-mon[87980]: pgmap v154: 161 pgs: 161 active+clean; 457 KiB data, 294 MiB used, 160 GiB / 160 GiB avail; 2.1 KiB/s rd, 2 op/s 2026-03-10T07:06:58.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:58 vm02 ceph-mon[87980]: from='client.? 192.168.123.102:0/3381596939' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/1311453216"}]: dispatch 2026-03-10T07:06:58.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:58 vm02 ceph-mon[86149]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/1143934037"}]': finished 2026-03-10T07:06:58.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:58 vm02 ceph-mon[86149]: osdmap e128: 8 total, 8 up, 8 in 2026-03-10T07:06:58.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:58 vm02 ceph-mon[86149]: pgmap v154: 161 pgs: 161 active+clean; 457 KiB data, 294 MiB used, 160 GiB / 160 GiB avail; 2.1 KiB/s rd, 2 op/s 2026-03-10T07:06:58.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:58 vm02 ceph-mon[86149]: from='client.? 192.168.123.102:0/3381596939' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/1311453216"}]: dispatch 2026-03-10T07:06:59.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:59 vm05 ceph-mon[76604]: from='client.? 192.168.123.102:0/3381596939' entity='client.iscsi.foo.vm02.iphfbm' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/1311453216"}]': finished 2026-03-10T07:06:59.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:59 vm05 ceph-mon[76604]: osdmap e129: 8 total, 8 up, 8 in 2026-03-10T07:06:59.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:59 vm05 ceph-mon[76604]: from='client.? 192.168.123.102:0/1025725279' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/905837153"}]: dispatch 2026-03-10T07:06:59.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:06:59 vm05 ceph-mon[76604]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/905837153"}]: dispatch 2026-03-10T07:06:59.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:59 vm02 ceph-mon[87980]: from='client.? 192.168.123.102:0/3381596939' entity='client.iscsi.foo.vm02.iphfbm' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/1311453216"}]': finished 2026-03-10T07:06:59.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:59 vm02 ceph-mon[87980]: osdmap e129: 8 total, 8 up, 8 in 2026-03-10T07:06:59.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:59 vm02 ceph-mon[87980]: from='client.? 192.168.123.102:0/1025725279' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/905837153"}]: dispatch 2026-03-10T07:06:59.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:06:59 vm02 ceph-mon[87980]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/905837153"}]: dispatch 2026-03-10T07:06:59.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:59 vm02 ceph-mon[86149]: from='client.? 192.168.123.102:0/3381596939' entity='client.iscsi.foo.vm02.iphfbm' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/1311453216"}]': finished 2026-03-10T07:06:59.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:59 vm02 ceph-mon[86149]: osdmap e129: 8 total, 8 up, 8 in 2026-03-10T07:06:59.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:59 vm02 ceph-mon[86149]: from='client.? 192.168.123.102:0/1025725279' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/905837153"}]: dispatch 2026-03-10T07:06:59.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:06:59 vm02 ceph-mon[86149]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/905837153"}]: dispatch 2026-03-10T07:07:00.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:07:00 vm05 ceph-mon[76604]: pgmap v156: 161 pgs: 161 active+clean; 457 KiB data, 294 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 0 op/s 2026-03-10T07:07:00.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:07:00 vm05 ceph-mon[76604]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/905837153"}]': finished 2026-03-10T07:07:00.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:07:00 vm05 ceph-mon[76604]: osdmap e130: 8 total, 8 up, 8 in 2026-03-10T07:07:00.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:07:00 vm05 ceph-mon[76604]: from='client.? 192.168.123.102:0/2142404750' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/1763277220"}]: dispatch 2026-03-10T07:07:00.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:07:00 vm02 ceph-mon[87980]: pgmap v156: 161 pgs: 161 active+clean; 457 KiB data, 294 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 0 op/s 2026-03-10T07:07:00.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:07:00 vm02 ceph-mon[87980]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/905837153"}]': finished 2026-03-10T07:07:00.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:07:00 vm02 ceph-mon[87980]: osdmap e130: 8 total, 8 up, 8 in 2026-03-10T07:07:00.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:07:00 vm02 ceph-mon[87980]: from='client.? 192.168.123.102:0/2142404750' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/1763277220"}]: dispatch 2026-03-10T07:07:00.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:07:00 vm02 ceph-mon[86149]: pgmap v156: 161 pgs: 161 active+clean; 457 KiB data, 294 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 0 op/s 2026-03-10T07:07:00.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:07:00 vm02 ceph-mon[86149]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/905837153"}]': finished 2026-03-10T07:07:00.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:07:00 vm02 ceph-mon[86149]: osdmap e130: 8 total, 8 up, 8 in 2026-03-10T07:07:00.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:07:00 vm02 ceph-mon[86149]: from='client.? 192.168.123.102:0/2142404750' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/1763277220"}]: dispatch 2026-03-10T07:07:01.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:07:01 vm05 ceph-mon[76604]: from='client.? 192.168.123.102:0/2142404750' entity='client.iscsi.foo.vm02.iphfbm' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/1763277220"}]': finished 2026-03-10T07:07:01.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:07:01 vm05 ceph-mon[76604]: osdmap e131: 8 total, 8 up, 8 in 2026-03-10T07:07:01.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:07:01 vm05 ceph-mon[76604]: from='client.? 192.168.123.102:0/3158234064' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:6800/2472641779"}]: dispatch 2026-03-10T07:07:01.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:07:01 vm05 ceph-mon[76604]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:6800/2472641779"}]: dispatch 2026-03-10T07:07:01.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:07:01 vm02 ceph-mon[87980]: from='client.? 192.168.123.102:0/2142404750' entity='client.iscsi.foo.vm02.iphfbm' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/1763277220"}]': finished 2026-03-10T07:07:01.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:07:01 vm02 ceph-mon[87980]: osdmap e131: 8 total, 8 up, 8 in 2026-03-10T07:07:01.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:07:01 vm02 ceph-mon[87980]: from='client.? 192.168.123.102:0/3158234064' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:6800/2472641779"}]: dispatch 2026-03-10T07:07:01.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:07:01 vm02 ceph-mon[87980]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:6800/2472641779"}]: dispatch 2026-03-10T07:07:01.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:07:01 vm02 ceph-mon[86149]: from='client.? 192.168.123.102:0/2142404750' entity='client.iscsi.foo.vm02.iphfbm' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:0/1763277220"}]': finished 2026-03-10T07:07:01.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:07:01 vm02 ceph-mon[86149]: osdmap e131: 8 total, 8 up, 8 in 2026-03-10T07:07:01.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:07:01 vm02 ceph-mon[86149]: from='client.? 192.168.123.102:0/3158234064' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:6800/2472641779"}]: dispatch 2026-03-10T07:07:01.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:07:01 vm02 ceph-mon[86149]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:6800/2472641779"}]: dispatch 2026-03-10T07:07:02.085 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:07:01 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: ::ffff:192.168.123.105 - - [10/Mar/2026:07:07:01] "GET /metrics HTTP/1.1" 200 38334 "" "Prometheus/2.51.0" 2026-03-10T07:07:02.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:07:02 vm05 ceph-mon[76604]: pgmap v159: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail 2026-03-10T07:07:02.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:07:02 vm05 ceph-mon[76604]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:6800/2472641779"}]': finished 2026-03-10T07:07:02.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:07:02 vm05 ceph-mon[76604]: osdmap e132: 8 total, 8 up, 8 in 2026-03-10T07:07:02.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:07:02 vm02 ceph-mon[87980]: pgmap v159: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail 2026-03-10T07:07:02.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:07:02 vm02 ceph-mon[87980]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:6800/2472641779"}]': finished 2026-03-10T07:07:02.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:07:02 vm02 ceph-mon[87980]: osdmap e132: 8 total, 8 up, 8 in 2026-03-10T07:07:02.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:07:02 vm02 ceph-mon[86149]: pgmap v159: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail 2026-03-10T07:07:02.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:07:02 vm02 ceph-mon[86149]: from='client.? ' entity='client.iscsi.foo.vm02.iphfbm' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.102:6800/2472641779"}]': finished 2026-03-10T07:07:02.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:07:02 vm02 ceph-mon[86149]: osdmap e132: 8 total, 8 up, 8 in 2026-03-10T07:07:04.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:07:04 vm05 ceph-mon[76604]: pgmap v161: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.0 KiB/s rd, 1 op/s 2026-03-10T07:07:04.504 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:07:04 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:07:04.147Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)\", cluster=\"28bd35e6-1c4e-11f1-9057-21b3549603fc\", cluster_addr=\"192.168.123.102\", device_class=\"hdd\", hostname=\"vm02\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.102\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.102\", device_class=\"hdd\", hostname=\"vm02\", instance=\"192.168.123.105:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.102\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T07:07:04.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:07:04 vm02 ceph-mon[87980]: pgmap v161: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.0 KiB/s rd, 1 op/s 2026-03-10T07:07:04.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:07:04 vm02 ceph-mon[86149]: pgmap v161: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.0 KiB/s rd, 1 op/s 2026-03-10T07:07:06.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:07:06 vm02 ceph-mon[87980]: pgmap v162: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:07:06.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:07:06 vm02 ceph-mon[86149]: pgmap v162: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:07:06.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:07:06 vm05 ceph-mon[76604]: pgmap v162: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:07:07.253 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:07:06 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:07:06.947Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm02\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"28bd35e6-1c4e-11f1-9057-21b3549603fc\", domainname=\"(none)\", instance=\"vm02\", job=\"node\", machine=\"x86_64\", nodename=\"vm02\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm02\", job=\"node\", machine=\"x86_64\", nodename=\"vm02\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T07:07:07.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:07:07 vm02 ceph-mon[86149]: from='client.34488 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:07:07.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:07:07 vm02 ceph-mon[87980]: from='client.34488 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:07:07.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:07:07 vm05 ceph-mon[76604]: from='client.34488 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:07:08.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:07:08 vm02 ceph-mon[87980]: pgmap v163: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.3 KiB/s rd, 1 op/s 2026-03-10T07:07:08.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:07:08 vm02 ceph-mon[86149]: pgmap v163: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.3 KiB/s rd, 1 op/s 2026-03-10T07:07:08.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:07:08 vm05 ceph-mon[76604]: pgmap v163: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.3 KiB/s rd, 1 op/s 2026-03-10T07:07:10.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:07:10 vm02 ceph-mon[87980]: pgmap v164: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-10T07:07:10.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:07:10 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T07:07:10.584 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:07:10 vm02 ceph-mon[86149]: pgmap v164: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-10T07:07:10.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:07:10 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T07:07:10.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:07:10 vm05 ceph-mon[76604]: pgmap v164: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-10T07:07:10.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:07:10 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T07:07:12.085 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:07:11 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: ::ffff:192.168.123.105 - - [10/Mar/2026:07:07:11] "GET /metrics HTTP/1.1" 200 38336 "" "Prometheus/2.51.0" 2026-03-10T07:07:12.196 INFO:teuthology.orchestra.run.vm02.stdout:true 2026-03-10T07:07:12.413 INFO:teuthology.orchestra.run.vm02.stdout:"Error: UPGRADE_REDEPLOY_DAEMON: Upgrading daemon iscsi.foo.vm02.iphfbm on host vm02 failed." 2026-03-10T07:07:12.449 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:07:12 vm02 ceph-mon[86149]: pgmap v165: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-10T07:07:12.450 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:07:12 vm02 ceph-mon[87980]: pgmap v165: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-10T07:07:12.487 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch ps' 2026-03-10T07:07:12.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:07:12 vm05 ceph-mon[76604]: pgmap v165: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-10T07:07:13.051 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T07:07:13.051 INFO:teuthology.orchestra.run.vm02.stdout:alertmanager.a vm02 *:9093,9094 running (4m) 26s ago 8m - - 0.25.0 c8568f914cd2 520cbcc5ad98 2026-03-10T07:07:13.051 INFO:teuthology.orchestra.run.vm02.stdout:grafana.a vm05 *:3000 running (4m) 26s ago 8m 46.4M - dad864ee21e9 34567dcb4b51 2026-03-10T07:07:13.051 INFO:teuthology.orchestra.run.vm02.stdout:iscsi.foo.vm02.iphfbm vm02 unknown 26s ago 8m - - 2026-03-10T07:07:13.051 INFO:teuthology.orchestra.run.vm02.stdout:mgr.x vm05 *:8443,9283,8765 running (3m) 26s ago 10m 487M - 19.2.3-678-ge911bdeb 654f31e6858e cdd4c8db2d43 2026-03-10T07:07:13.051 INFO:teuthology.orchestra.run.vm02.stdout:mgr.y vm02 *:8443,9283,8765 running (4m) 26s ago 10m - - 19.2.3-678-ge911bdeb 654f31e6858e e1dc22afcf31 2026-03-10T07:07:13.051 INFO:teuthology.orchestra.run.vm02.stdout:mon.a vm02 running (3m) 26s ago 11m - 2048M 19.2.3-678-ge911bdeb 654f31e6858e 194e84dd73c4 2026-03-10T07:07:13.051 INFO:teuthology.orchestra.run.vm02.stdout:mon.b vm05 running (3m) 26s ago 10m 41.8M 2048M 19.2.3-678-ge911bdeb 654f31e6858e e901cead026d 2026-03-10T07:07:13.051 INFO:teuthology.orchestra.run.vm02.stdout:mon.c vm02 running (3m) 26s ago 10m - 2048M 19.2.3-678-ge911bdeb 654f31e6858e 959c5054b3d9 2026-03-10T07:07:13.051 INFO:teuthology.orchestra.run.vm02.stdout:node-exporter.a vm02 *:9100 running (4m) 26s ago 8m - - 1.7.0 72c9c2088986 0bc0b34c732a 2026-03-10T07:07:13.051 INFO:teuthology.orchestra.run.vm02.stdout:node-exporter.b vm05 *:9100 running (4m) 26s ago 8m 9.91M - 1.7.0 72c9c2088986 0129ed456f9d 2026-03-10T07:07:13.051 INFO:teuthology.orchestra.run.vm02.stdout:osd.0 vm02 running (3m) 26s ago 10m - 4096M 19.2.3-678-ge911bdeb 654f31e6858e bf86ac25f7fb 2026-03-10T07:07:13.051 INFO:teuthology.orchestra.run.vm02.stdout:osd.1 vm02 running (3m) 26s ago 9m - 4096M 19.2.3-678-ge911bdeb 654f31e6858e 6a939dbe6adc 2026-03-10T07:07:13.051 INFO:teuthology.orchestra.run.vm02.stdout:osd.2 vm02 running (2m) 26s ago 9m - 4096M 19.2.3-678-ge911bdeb 654f31e6858e b0f12ddcd6b3 2026-03-10T07:07:13.051 INFO:teuthology.orchestra.run.vm02.stdout:osd.3 vm02 running (2m) 26s ago 9m - 4096M 19.2.3-678-ge911bdeb 654f31e6858e 396c050a0b4f 2026-03-10T07:07:13.051 INFO:teuthology.orchestra.run.vm02.stdout:osd.4 vm05 running (117s) 26s ago 9m 55.7M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 05476fb1f9d8 2026-03-10T07:07:13.051 INFO:teuthology.orchestra.run.vm02.stdout:osd.5 vm05 running (95s) 26s ago 9m 71.8M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 37ab69a8d103 2026-03-10T07:07:13.051 INFO:teuthology.orchestra.run.vm02.stdout:osd.6 vm05 running (74s) 26s ago 9m 48.2M 4096M 19.2.3-678-ge911bdeb 654f31e6858e affae9a808f3 2026-03-10T07:07:13.051 INFO:teuthology.orchestra.run.vm02.stdout:osd.7 vm05 running (53s) 26s ago 9m 70.4M 4096M 19.2.3-678-ge911bdeb 654f31e6858e d45f9e33923c 2026-03-10T07:07:13.051 INFO:teuthology.orchestra.run.vm02.stdout:prometheus.a vm05 *:9095 running (3m) 26s ago 8m 44.3M - 2.51.0 1d3b7f56885b 2f14ff887bc7 2026-03-10T07:07:13.051 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.kkmsll vm02 *:8000 running (47s) 26s ago 8m - - 19.2.3-678-ge911bdeb 654f31e6858e c3e18d1a8070 2026-03-10T07:07:13.052 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm05.bmslvs vm05 *:8000 running (43s) 26s ago 8m 100M - 19.2.3-678-ge911bdeb 654f31e6858e 2f5744dcd331 2026-03-10T07:07:13.052 INFO:teuthology.orchestra.run.vm02.stdout:rgw.smpl.vm02.kyvfxo vm02 *:80 running (45s) 26s ago 8m - - 19.2.3-678-ge911bdeb 654f31e6858e cec8929a3f84 2026-03-10T07:07:13.052 INFO:teuthology.orchestra.run.vm02.stdout:rgw.smpl.vm05.xjafam vm05 *:80 running (41s) 26s ago 8m 98.1M - 19.2.3-678-ge911bdeb 654f31e6858e 6b376bc4bc8f 2026-03-10T07:07:13.122 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph versions' 2026-03-10T07:07:13.318 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:07:13 vm02 ceph-mon[86149]: from='client.34524 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:07:13.318 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:07:13 vm02 ceph-mon[86149]: from='client.34530 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:07:13.318 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:07:13 vm02 ceph-mon[87980]: from='client.34524 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:07:13.318 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:07:13 vm02 ceph-mon[87980]: from='client.34530 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:07:13.691 INFO:teuthology.orchestra.run.vm02.stdout:{ 2026-03-10T07:07:13.691 INFO:teuthology.orchestra.run.vm02.stdout: "mon": { 2026-03-10T07:07:13.691 INFO:teuthology.orchestra.run.vm02.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 3 2026-03-10T07:07:13.691 INFO:teuthology.orchestra.run.vm02.stdout: }, 2026-03-10T07:07:13.691 INFO:teuthology.orchestra.run.vm02.stdout: "mgr": { 2026-03-10T07:07:13.691 INFO:teuthology.orchestra.run.vm02.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-10T07:07:13.691 INFO:teuthology.orchestra.run.vm02.stdout: }, 2026-03-10T07:07:13.691 INFO:teuthology.orchestra.run.vm02.stdout: "osd": { 2026-03-10T07:07:13.691 INFO:teuthology.orchestra.run.vm02.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 8 2026-03-10T07:07:13.691 INFO:teuthology.orchestra.run.vm02.stdout: }, 2026-03-10T07:07:13.691 INFO:teuthology.orchestra.run.vm02.stdout: "rgw": { 2026-03-10T07:07:13.691 INFO:teuthology.orchestra.run.vm02.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 4 2026-03-10T07:07:13.691 INFO:teuthology.orchestra.run.vm02.stdout: }, 2026-03-10T07:07:13.691 INFO:teuthology.orchestra.run.vm02.stdout: "overall": { 2026-03-10T07:07:13.691 INFO:teuthology.orchestra.run.vm02.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 17 2026-03-10T07:07:13.691 INFO:teuthology.orchestra.run.vm02.stdout: } 2026-03-10T07:07:13.691 INFO:teuthology.orchestra.run.vm02.stdout:} 2026-03-10T07:07:13.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:07:13 vm05 ceph-mon[76604]: from='client.34524 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:07:13.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:07:13 vm05 ceph-mon[76604]: from='client.34530 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:07:13.764 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'echo "wait for servicemap items w/ changing names to refresh"' 2026-03-10T07:07:14.081 INFO:teuthology.orchestra.run.vm02.stdout:wait for servicemap items w/ changing names to refresh 2026-03-10T07:07:14.138 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'sleep 60' 2026-03-10T07:07:14.303 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:07:14 vm02 ceph-mon[87980]: from='client.34533 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:07:14.303 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:07:14 vm02 ceph-mon[86149]: from='client.34533 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:07:14.303 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:07:14 vm02 ceph-mon[86149]: pgmap v166: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.3 KiB/s rd, 1 op/s 2026-03-10T07:07:14.303 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:07:14 vm02 ceph-mon[86149]: from='client.? 192.168.123.102:0/121150195' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:07:14.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:07:14 vm05 ceph-mon[76604]: from='client.34533 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:07:14.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:07:14 vm05 ceph-mon[76604]: pgmap v166: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.3 KiB/s rd, 1 op/s 2026-03-10T07:07:14.504 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:07:14 vm05 ceph-mon[76604]: from='client.? 192.168.123.102:0/121150195' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:07:14.504 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:07:14 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:07:14.147Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)\", cluster=\"28bd35e6-1c4e-11f1-9057-21b3549603fc\", cluster_addr=\"192.168.123.102\", device_class=\"hdd\", hostname=\"vm02\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.102\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.102\", device_class=\"hdd\", hostname=\"vm02\", instance=\"192.168.123.105:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.102\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T07:07:14.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:07:14 vm02 ceph-mon[87980]: pgmap v166: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.3 KiB/s rd, 1 op/s 2026-03-10T07:07:14.585 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:07:14 vm02 ceph-mon[87980]: from='client.? 192.168.123.102:0/121150195' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:07:16.369 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:07:16 vm02 ceph-mon[86149]: pgmap v167: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:07:16.369 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:07:16 vm02 ceph-mon[87980]: pgmap v167: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:07:16.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:07:16 vm05 ceph-mon[76604]: pgmap v167: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:07:17.253 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:07:16 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:07:16.948Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm02\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"28bd35e6-1c4e-11f1-9057-21b3549603fc\", domainname=\"(none)\", instance=\"vm02\", job=\"node\", machine=\"x86_64\", nodename=\"vm02\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm02\", job=\"node\", machine=\"x86_64\", nodename=\"vm02\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T07:07:17.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:07:17 vm02 ceph-mon[87980]: from='client.34488 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:07:17.585 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:07:17 vm02 ceph-mon[86149]: from='client.34488 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:07:17.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:07:17 vm05 ceph-mon[76604]: from='client.34488 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:07:18.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:07:18 vm02 ceph-mon[87980]: pgmap v168: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:07:18.584 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:07:18 vm02 ceph-mon[86149]: pgmap v168: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:07:18.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:07:18 vm05 ceph-mon[76604]: pgmap v168: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:07:20.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:07:20 vm02 ceph-mon[87980]: pgmap v169: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:07:20.584 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:07:20 vm02 ceph-mon[86149]: pgmap v169: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:07:20.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:07:20 vm05 ceph-mon[76604]: pgmap v169: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:07:22.084 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:07:21 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: ::ffff:192.168.123.105 - - [10/Mar/2026:07:07:21] "GET /metrics HTTP/1.1" 200 38336 "" "Prometheus/2.51.0" 2026-03-10T07:07:22.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:07:22 vm02 ceph-mon[87980]: pgmap v170: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:07:22.584 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:07:22 vm02 ceph-mon[86149]: pgmap v170: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:07:22.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:07:22 vm05 ceph-mon[76604]: pgmap v170: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:07:24.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:07:24 vm05 ceph-mon[76604]: pgmap v171: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:07:24.503 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:07:24 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:07:24.147Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)\", cluster=\"28bd35e6-1c4e-11f1-9057-21b3549603fc\", cluster_addr=\"192.168.123.102\", device_class=\"hdd\", hostname=\"vm02\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.102\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.102\", device_class=\"hdd\", hostname=\"vm02\", instance=\"192.168.123.105:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.102\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T07:07:24.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:07:24 vm02 ceph-mon[87980]: pgmap v171: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:07:24.584 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:07:24 vm02 ceph-mon[86149]: pgmap v171: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:07:25.732 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:07:25 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T07:07:25.732 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:07:25 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T07:07:25.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:07:25 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T07:07:26.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:07:26 vm05 ceph-mon[76604]: pgmap v172: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:07:26.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:07:26 vm02 ceph-mon[87980]: pgmap v172: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:07:26.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:07:26 vm02 ceph-mon[86149]: pgmap v172: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:07:27.254 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:07:26 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:07:26.947Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm02\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"28bd35e6-1c4e-11f1-9057-21b3549603fc\", domainname=\"(none)\", instance=\"vm02\", job=\"node\", machine=\"x86_64\", nodename=\"vm02\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm02\", job=\"node\", machine=\"x86_64\", nodename=\"vm02\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T07:07:27.754 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:07:27 vm05 ceph-mon[76604]: from='client.34488 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:07:27.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:07:27 vm02 ceph-mon[87980]: from='client.34488 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:07:27.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:07:27 vm02 ceph-mon[86149]: from='client.34488 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:07:28.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:07:28 vm05 ceph-mon[76604]: pgmap v173: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:07:28.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:07:28 vm02 ceph-mon[87980]: pgmap v173: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:07:28.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:07:28 vm02 ceph-mon[86149]: pgmap v173: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:07:30.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:07:30 vm05 ceph-mon[76604]: pgmap v174: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:07:30.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:07:30 vm02 ceph-mon[87980]: pgmap v174: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:07:30.834 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:07:30 vm02 ceph-mon[86149]: pgmap v174: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:07:32.085 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:07:31 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: ::ffff:192.168.123.105 - - [10/Mar/2026:07:07:31] "GET /metrics HTTP/1.1" 200 38326 "" "Prometheus/2.51.0" 2026-03-10T07:07:32.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:07:32 vm05 ceph-mon[76604]: pgmap v175: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:07:32.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:07:32 vm02 ceph-mon[87980]: pgmap v175: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:07:32.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:07:32 vm02 ceph-mon[86149]: pgmap v175: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:07:34.503 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:07:34 vm05 ceph-mon[76604]: pgmap v176: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:07:34.503 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:07:34 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:07:34.147Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)\", cluster=\"28bd35e6-1c4e-11f1-9057-21b3549603fc\", cluster_addr=\"192.168.123.102\", device_class=\"hdd\", hostname=\"vm02\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.102\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.102\", device_class=\"hdd\", hostname=\"vm02\", instance=\"192.168.123.105:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.102\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T07:07:34.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:07:34 vm02 ceph-mon[87980]: pgmap v176: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:07:34.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:07:34 vm02 ceph-mon[86149]: pgmap v176: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:07:36.753 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:07:36 vm05 ceph-mon[76604]: pgmap v177: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:07:36.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:07:36 vm02 ceph-mon[87980]: pgmap v177: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:07:36.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:07:36 vm02 ceph-mon[86149]: pgmap v177: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:07:37.253 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:07:36 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:07:36.947Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm02\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"28bd35e6-1c4e-11f1-9057-21b3549603fc\", domainname=\"(none)\", instance=\"vm02\", job=\"node\", machine=\"x86_64\", nodename=\"vm02\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm02\", job=\"node\", machine=\"x86_64\", nodename=\"vm02\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T07:07:37.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:07:37 vm02 ceph-mon[87980]: from='client.34488 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:07:37.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:07:37 vm02 ceph-mon[86149]: from='client.34488 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:07:38.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:07:37 vm05 ceph-mon[76604]: from='client.34488 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:07:38.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:07:38 vm02 ceph-mon[87980]: pgmap v178: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:07:38.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:07:38 vm02 ceph-mon[86149]: pgmap v178: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:07:39.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:07:38 vm05 ceph-mon[76604]: pgmap v178: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:07:40.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:07:40 vm02 ceph-mon[87980]: pgmap v179: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:07:40.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:07:40 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T07:07:40.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:07:40 vm02 ceph-mon[86149]: pgmap v179: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:07:40.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:07:40 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T07:07:41.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:07:40 vm05 ceph-mon[76604]: pgmap v179: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:07:41.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:07:40 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T07:07:42.084 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:07:41 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: ::ffff:192.168.123.105 - - [10/Mar/2026:07:07:41] "GET /metrics HTTP/1.1" 200 38335 "" "Prometheus/2.51.0" 2026-03-10T07:07:42.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:07:42 vm02 ceph-mon[87980]: pgmap v180: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:07:42.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:07:42 vm02 ceph-mon[86149]: pgmap v180: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:07:43.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:07:42 vm05 ceph-mon[76604]: pgmap v180: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:07:44.504 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:07:44 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:07:44.146Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)\", cluster=\"28bd35e6-1c4e-11f1-9057-21b3549603fc\", cluster_addr=\"192.168.123.102\", device_class=\"hdd\", hostname=\"vm02\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.102\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.102\", device_class=\"hdd\", hostname=\"vm02\", instance=\"192.168.123.105:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.102\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T07:07:44.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:07:44 vm02 ceph-mon[87980]: pgmap v181: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:07:44.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:07:44 vm02 ceph-mon[86149]: pgmap v181: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:07:45.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:07:44 vm05 ceph-mon[76604]: pgmap v181: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:07:46.805 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:07:46 vm05 ceph-mon[76604]: pgmap v182: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:07:46.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:07:46 vm02 ceph-mon[87980]: pgmap v182: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:07:46.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:07:46 vm02 ceph-mon[86149]: pgmap v182: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:07:47.254 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:07:46 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:07:46.947Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm02\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"28bd35e6-1c4e-11f1-9057-21b3549603fc\", domainname=\"(none)\", instance=\"vm02\", job=\"node\", machine=\"x86_64\", nodename=\"vm02\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm02\", job=\"node\", machine=\"x86_64\", nodename=\"vm02\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T07:07:47.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:07:47 vm02 ceph-mon[87980]: from='client.34488 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:07:47.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:07:47 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:07:47.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:07:47 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T07:07:47.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:07:47 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:07:47.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:07:47 vm02 ceph-mon[86149]: from='client.34488 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:07:47.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:07:47 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:07:47.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:07:47 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T07:07:47.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:07:47 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:07:48.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:07:47 vm05 ceph-mon[76604]: from='client.34488 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:07:48.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:07:47 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T07:07:48.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:07:47 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T07:07:48.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:07:47 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' 2026-03-10T07:07:48.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:07:48 vm02 ceph-mon[87980]: pgmap v183: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:07:48.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:07:48 vm02 ceph-mon[87980]: pgmap v184: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 990 B/s rd, 0 op/s 2026-03-10T07:07:48.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:07:48 vm02 ceph-mon[87980]: pgmap v185: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:07:48.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:07:48 vm02 ceph-mon[86149]: pgmap v183: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:07:48.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:07:48 vm02 ceph-mon[86149]: pgmap v184: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 990 B/s rd, 0 op/s 2026-03-10T07:07:48.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:07:48 vm02 ceph-mon[86149]: pgmap v185: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:07:49.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:07:48 vm05 ceph-mon[76604]: pgmap v183: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:07:49.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:07:48 vm05 ceph-mon[76604]: pgmap v184: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 990 B/s rd, 0 op/s 2026-03-10T07:07:49.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:07:48 vm05 ceph-mon[76604]: pgmap v185: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:07:49.335 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:07:48 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[81637]: ts=2026-03-10T07:07:48.846Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T07:07:50.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:07:50 vm02 ceph-mon[87980]: pgmap v186: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:07:50.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:07:50 vm02 ceph-mon[86149]: pgmap v186: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:07:51.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:07:50 vm05 ceph-mon[76604]: pgmap v186: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:07:52.084 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:07:51 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: ::ffff:192.168.123.105 - - [10/Mar/2026:07:07:51] "GET /metrics HTTP/1.1" 200 38335 "" "Prometheus/2.51.0" 2026-03-10T07:07:52.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:07:52 vm02 ceph-mon[87980]: pgmap v187: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 613 B/s rd, 0 op/s 2026-03-10T07:07:52.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:07:52 vm02 ceph-mon[86149]: pgmap v187: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 613 B/s rd, 0 op/s 2026-03-10T07:07:53.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:07:52 vm05 ceph-mon[76604]: pgmap v187: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 613 B/s rd, 0 op/s 2026-03-10T07:07:54.503 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:07:54 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:07:54.147Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)\", cluster=\"28bd35e6-1c4e-11f1-9057-21b3549603fc\", cluster_addr=\"192.168.123.102\", device_class=\"hdd\", hostname=\"vm02\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.102\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.102\", device_class=\"hdd\", hostname=\"vm02\", instance=\"192.168.123.105:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.102\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-10T07:07:54.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:07:54 vm02 ceph-mon[87980]: pgmap v188: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:07:54.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:07:54 vm02 ceph-mon[86149]: pgmap v188: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:07:55.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:07:54 vm05 ceph-mon[76604]: pgmap v188: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:07:56.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:07:55 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T07:07:56.084 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:07:55 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T07:07:56.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:07:55 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T07:07:57.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:07:56 vm05 ceph-mon[76604]: pgmap v189: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 613 B/s rd, 0 op/s 2026-03-10T07:07:57.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:07:56 vm02 ceph-mon[87980]: pgmap v189: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 613 B/s rd, 0 op/s 2026-03-10T07:07:57.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:07:56 vm02 ceph-mon[86149]: pgmap v189: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 613 B/s rd, 0 op/s 2026-03-10T07:07:58.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:07:57 vm05 ceph-mon[76604]: from='client.34488 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:07:58.084 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:07:57 vm02 ceph-mon[87980]: from='client.34488 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:07:58.086 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:07:57 vm02 ceph-mon[86149]: from='client.34488 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:07:59.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:07:58 vm05 ceph-mon[76604]: pgmap v190: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-10T07:07:59.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:07:58 vm02 ceph-mon[87980]: pgmap v190: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-10T07:07:59.085 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:07:58 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[81637]: ts=2026-03-10T07:07:58.845Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=1 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T07:07:59.085 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:07:58 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[81637]: ts=2026-03-10T07:07:58.846Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T07:07:59.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:07:58 vm02 ceph-mon[86149]: pgmap v190: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-10T07:08:01.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:08:00 vm05 ceph-mon[76604]: pgmap v191: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:08:01.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:08:00 vm02 ceph-mon[87980]: pgmap v191: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:08:01.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:08:00 vm02 ceph-mon[86149]: pgmap v191: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:08:02.085 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:08:01 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: ::ffff:192.168.123.105 - - [10/Mar/2026:07:08:01] "GET /metrics HTTP/1.1" 200 38334 "" "Prometheus/2.51.0" 2026-03-10T07:08:03.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:08:02 vm05 ceph-mon[76604]: pgmap v192: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:08:03.084 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:08:02 vm02 ceph-mon[87980]: pgmap v192: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:08:03.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:08:02 vm02 ceph-mon[86149]: pgmap v192: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:08:04.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:08:03 vm05 ceph-mon[76604]: pgmap v193: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:08:04.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:08:03 vm02 ceph-mon[87980]: pgmap v193: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:08:04.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:08:03 vm02 ceph-mon[86149]: pgmap v193: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:08:06.834 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:08:06 vm02 ceph-mon[87980]: pgmap v194: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:08:06.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:08:06 vm02 ceph-mon[86149]: pgmap v194: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:08:07.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:08:06 vm05 ceph-mon[76604]: pgmap v194: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:08:07.335 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:08:06 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[81637]: ts=2026-03-10T07:08:06.951Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T07:08:08.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:08:07 vm05 ceph-mon[76604]: from='client.34488 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:08:08.084 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:08:07 vm02 ceph-mon[87980]: from='client.34488 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:08:08.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:08:07 vm02 ceph-mon[86149]: from='client.34488 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:08:08.846 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:08:08 vm02 ceph-mon[87980]: pgmap v195: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:08:08.847 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:08:08 vm02 ceph-mon[86149]: pgmap v195: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:08:09.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:08:08 vm05 ceph-mon[76604]: pgmap v195: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:08:09.335 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:08:08 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[81637]: ts=2026-03-10T07:08:08.846Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=1 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T07:08:09.335 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:08:08 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[81637]: ts=2026-03-10T07:08:08.848Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T07:08:10.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:08:09 vm05 ceph-mon[76604]: pgmap v196: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:08:10.084 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:08:09 vm02 ceph-mon[87980]: pgmap v196: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:08:10.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:08:09 vm02 ceph-mon[86149]: pgmap v196: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:08:11.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:08:10 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T07:08:11.084 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:08:10 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T07:08:11.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:08:10 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T07:08:12.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:08:11 vm05 ceph-mon[76604]: pgmap v197: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:08:12.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:08:11 vm02 ceph-mon[87980]: pgmap v197: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:08:12.085 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:08:11 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: ::ffff:192.168.123.105 - - [10/Mar/2026:07:08:11] "GET /metrics HTTP/1.1" 200 38334 "" "Prometheus/2.51.0" 2026-03-10T07:08:12.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:08:11 vm02 ceph-mon[86149]: pgmap v197: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:08:14.508 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch ps' 2026-03-10T07:08:14.721 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:08:14 vm02 ceph-mon[86149]: pgmap v198: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:08:14.721 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:08:14 vm02 ceph-mon[87980]: pgmap v198: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:08:15.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:08:14 vm05 ceph-mon[76604]: pgmap v198: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:08:15.079 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T07:08:15.079 INFO:teuthology.orchestra.run.vm02.stdout:alertmanager.a vm02 *:9093,9094 running (5m) 88s ago 9m - - 0.25.0 c8568f914cd2 520cbcc5ad98 2026-03-10T07:08:15.079 INFO:teuthology.orchestra.run.vm02.stdout:grafana.a vm05 *:3000 running (5m) 88s ago 9m 46.4M - dad864ee21e9 34567dcb4b51 2026-03-10T07:08:15.079 INFO:teuthology.orchestra.run.vm02.stdout:iscsi.foo.vm02.iphfbm vm02 unknown 88s ago 9m - - 2026-03-10T07:08:15.079 INFO:teuthology.orchestra.run.vm02.stdout:mgr.x vm05 *:8443,9283,8765 running (4m) 88s ago 11m 487M - 19.2.3-678-ge911bdeb 654f31e6858e cdd4c8db2d43 2026-03-10T07:08:15.079 INFO:teuthology.orchestra.run.vm02.stdout:mgr.y vm02 *:8443,9283,8765 running (5m) 88s ago 12m - - 19.2.3-678-ge911bdeb 654f31e6858e e1dc22afcf31 2026-03-10T07:08:15.079 INFO:teuthology.orchestra.run.vm02.stdout:mon.a vm02 running (4m) 88s ago 12m - 2048M 19.2.3-678-ge911bdeb 654f31e6858e 194e84dd73c4 2026-03-10T07:08:15.079 INFO:teuthology.orchestra.run.vm02.stdout:mon.b vm05 running (4m) 88s ago 11m 41.8M 2048M 19.2.3-678-ge911bdeb 654f31e6858e e901cead026d 2026-03-10T07:08:15.079 INFO:teuthology.orchestra.run.vm02.stdout:mon.c vm02 running (4m) 88s ago 11m - 2048M 19.2.3-678-ge911bdeb 654f31e6858e 959c5054b3d9 2026-03-10T07:08:15.079 INFO:teuthology.orchestra.run.vm02.stdout:node-exporter.a vm02 *:9100 running (5m) 88s ago 9m - - 1.7.0 72c9c2088986 0bc0b34c732a 2026-03-10T07:08:15.079 INFO:teuthology.orchestra.run.vm02.stdout:node-exporter.b vm05 *:9100 running (5m) 88s ago 9m 9.91M - 1.7.0 72c9c2088986 0129ed456f9d 2026-03-10T07:08:15.079 INFO:teuthology.orchestra.run.vm02.stdout:osd.0 vm02 running (4m) 88s ago 11m - 4096M 19.2.3-678-ge911bdeb 654f31e6858e bf86ac25f7fb 2026-03-10T07:08:15.079 INFO:teuthology.orchestra.run.vm02.stdout:osd.1 vm02 running (4m) 88s ago 10m - 4096M 19.2.3-678-ge911bdeb 654f31e6858e 6a939dbe6adc 2026-03-10T07:08:15.079 INFO:teuthology.orchestra.run.vm02.stdout:osd.2 vm02 running (3m) 88s ago 10m - 4096M 19.2.3-678-ge911bdeb 654f31e6858e b0f12ddcd6b3 2026-03-10T07:08:15.079 INFO:teuthology.orchestra.run.vm02.stdout:osd.3 vm02 running (3m) 88s ago 10m - 4096M 19.2.3-678-ge911bdeb 654f31e6858e 396c050a0b4f 2026-03-10T07:08:15.079 INFO:teuthology.orchestra.run.vm02.stdout:osd.4 vm05 running (2m) 88s ago 10m 55.7M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 05476fb1f9d8 2026-03-10T07:08:15.079 INFO:teuthology.orchestra.run.vm02.stdout:osd.5 vm05 running (2m) 88s ago 10m 71.8M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 37ab69a8d103 2026-03-10T07:08:15.079 INFO:teuthology.orchestra.run.vm02.stdout:osd.6 vm05 running (2m) 88s ago 10m 48.2M 4096M 19.2.3-678-ge911bdeb 654f31e6858e affae9a808f3 2026-03-10T07:08:15.079 INFO:teuthology.orchestra.run.vm02.stdout:osd.7 vm05 running (115s) 88s ago 10m 70.4M 4096M 19.2.3-678-ge911bdeb 654f31e6858e d45f9e33923c 2026-03-10T07:08:15.079 INFO:teuthology.orchestra.run.vm02.stdout:prometheus.a vm05 *:9095 running (5m) 88s ago 9m 44.3M - 2.51.0 1d3b7f56885b 2f14ff887bc7 2026-03-10T07:08:15.079 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.kkmsll vm02 *:8000 running (109s) 88s ago 9m - - 19.2.3-678-ge911bdeb 654f31e6858e c3e18d1a8070 2026-03-10T07:08:15.079 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm05.bmslvs vm05 *:8000 running (105s) 88s ago 9m 100M - 19.2.3-678-ge911bdeb 654f31e6858e 2f5744dcd331 2026-03-10T07:08:15.079 INFO:teuthology.orchestra.run.vm02.stdout:rgw.smpl.vm02.kyvfxo vm02 *:80 running (107s) 88s ago 9m - - 19.2.3-678-ge911bdeb 654f31e6858e cec8929a3f84 2026-03-10T07:08:15.079 INFO:teuthology.orchestra.run.vm02.stdout:rgw.smpl.vm05.xjafam vm05 *:80 running (103s) 88s ago 9m 98.1M - 19.2.3-678-ge911bdeb 654f31e6858e 6b376bc4bc8f 2026-03-10T07:08:15.137 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph versions' 2026-03-10T07:08:15.754 INFO:teuthology.orchestra.run.vm02.stdout:{ 2026-03-10T07:08:15.754 INFO:teuthology.orchestra.run.vm02.stdout: "mon": { 2026-03-10T07:08:15.754 INFO:teuthology.orchestra.run.vm02.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 3 2026-03-10T07:08:15.754 INFO:teuthology.orchestra.run.vm02.stdout: }, 2026-03-10T07:08:15.754 INFO:teuthology.orchestra.run.vm02.stdout: "mgr": { 2026-03-10T07:08:15.754 INFO:teuthology.orchestra.run.vm02.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-10T07:08:15.754 INFO:teuthology.orchestra.run.vm02.stdout: }, 2026-03-10T07:08:15.755 INFO:teuthology.orchestra.run.vm02.stdout: "osd": { 2026-03-10T07:08:15.755 INFO:teuthology.orchestra.run.vm02.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 8 2026-03-10T07:08:15.755 INFO:teuthology.orchestra.run.vm02.stdout: }, 2026-03-10T07:08:15.755 INFO:teuthology.orchestra.run.vm02.stdout: "rgw": { 2026-03-10T07:08:15.755 INFO:teuthology.orchestra.run.vm02.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 4 2026-03-10T07:08:15.755 INFO:teuthology.orchestra.run.vm02.stdout: }, 2026-03-10T07:08:15.755 INFO:teuthology.orchestra.run.vm02.stdout: "overall": { 2026-03-10T07:08:15.755 INFO:teuthology.orchestra.run.vm02.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 17 2026-03-10T07:08:15.755 INFO:teuthology.orchestra.run.vm02.stdout: } 2026-03-10T07:08:15.755 INFO:teuthology.orchestra.run.vm02.stdout:} 2026-03-10T07:08:15.843 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch upgrade status' 2026-03-10T07:08:16.440 INFO:teuthology.orchestra.run.vm02.stdout:{ 2026-03-10T07:08:16.440 INFO:teuthology.orchestra.run.vm02.stdout: "target_image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", 2026-03-10T07:08:16.440 INFO:teuthology.orchestra.run.vm02.stdout: "in_progress": true, 2026-03-10T07:08:16.440 INFO:teuthology.orchestra.run.vm02.stdout: "which": "Upgrading all daemon types on all hosts", 2026-03-10T07:08:16.440 INFO:teuthology.orchestra.run.vm02.stdout: "services_complete": [ 2026-03-10T07:08:16.440 INFO:teuthology.orchestra.run.vm02.stdout: "mgr", 2026-03-10T07:08:16.440 INFO:teuthology.orchestra.run.vm02.stdout: "rgw", 2026-03-10T07:08:16.440 INFO:teuthology.orchestra.run.vm02.stdout: "osd", 2026-03-10T07:08:16.440 INFO:teuthology.orchestra.run.vm02.stdout: "mon" 2026-03-10T07:08:16.440 INFO:teuthology.orchestra.run.vm02.stdout: ], 2026-03-10T07:08:16.440 INFO:teuthology.orchestra.run.vm02.stdout: "progress": "17/23 daemons upgraded", 2026-03-10T07:08:16.441 INFO:teuthology.orchestra.run.vm02.stdout: "message": "Error: UPGRADE_REDEPLOY_DAEMON: Upgrading daemon iscsi.foo.vm02.iphfbm on host vm02 failed.", 2026-03-10T07:08:16.441 INFO:teuthology.orchestra.run.vm02.stdout: "is_paused": true 2026-03-10T07:08:16.441 INFO:teuthology.orchestra.run.vm02.stdout:} 2026-03-10T07:08:16.529 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph health detail' 2026-03-10T07:08:16.722 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:08:16 vm02 ceph-mon[87980]: from='client.34542 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:08:16.722 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:08:16 vm02 ceph-mon[87980]: pgmap v199: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:08:16.722 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:08:16 vm02 ceph-mon[87980]: from='client.? 192.168.123.102:0/3350717774' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:08:16.722 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:08:16 vm02 ceph-mon[86149]: from='client.34542 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:08:16.722 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:08:16 vm02 ceph-mon[86149]: pgmap v199: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:08:16.722 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:08:16 vm02 ceph-mon[86149]: from='client.? 192.168.123.102:0/3350717774' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:08:16.805 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:08:16 vm05 ceph-mon[76604]: from='client.34542 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:08:16.805 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:08:16 vm05 ceph-mon[76604]: pgmap v199: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:08:16.805 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:08:16 vm05 ceph-mon[76604]: from='client.? 192.168.123.102:0/3350717774' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:08:17.085 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:08:16 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[81637]: ts=2026-03-10T07:08:16.951Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T07:08:17.085 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:08:16 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[81637]: ts=2026-03-10T07:08:16.953Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T07:08:17.197 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s); Upgrading daemon iscsi.foo.vm02.iphfbm on host vm02 failed. 2026-03-10T07:08:17.197 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T07:08:17.197 INFO:teuthology.orchestra.run.vm02.stdout: daemon iscsi.foo.vm02.iphfbm on vm02 is in unknown state 2026-03-10T07:08:17.197 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] UPGRADE_REDEPLOY_DAEMON: Upgrading daemon iscsi.foo.vm02.iphfbm on host vm02 failed. 2026-03-10T07:08:17.197 INFO:teuthology.orchestra.run.vm02.stdout: Upgrade daemon: iscsi.foo.vm02.iphfbm: cephadm exited with an error code: 1, stderr: Redeploy daemon iscsi.foo.vm02.iphfbm ... 2026-03-10T07:08:17.197 INFO:teuthology.orchestra.run.vm02.stdout:Creating ceph-iscsi config... 2026-03-10T07:08:17.197 INFO:teuthology.orchestra.run.vm02.stdout:Write file: /var/lib/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/iscsi.foo.vm02.iphfbm/iscsi-gateway.cfg 2026-03-10T07:08:17.197 INFO:teuthology.orchestra.run.vm02.stdout:Write file: /var/lib/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/iscsi.foo.vm02.iphfbm/tcmu-runner-entrypoint.sh 2026-03-10T07:08:17.197 INFO:teuthology.orchestra.run.vm02.stdout:Failed to trim old cgroups /sys/fs/cgroup/system.slice/system-ceph\x2d28bd35e6\x2d1c4e\x2d11f1\x2d9057\x2d21b3549603fc.slice/ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@iscsi.foo.vm02.iphfbm.service 2026-03-10T07:08:17.197 INFO:teuthology.orchestra.run.vm02.stdout:Non-zero exit code 1 from systemctl start ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@iscsi.foo.vm02.iphfbm 2026-03-10T07:08:17.197 INFO:teuthology.orchestra.run.vm02.stdout:systemctl: stderr Job for ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@iscsi.foo.vm02.iphfbm.service failed because the control process exited with error code. 2026-03-10T07:08:17.197 INFO:teuthology.orchestra.run.vm02.stdout:systemctl: stderr See "systemctl status ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@iscsi.foo.vm02.iphfbm.service" and "journalctl -xeu ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@iscsi.foo.vm02.iphfbm.service" for details. 2026-03-10T07:08:17.198 INFO:teuthology.orchestra.run.vm02.stdout:Traceback (most recent call last): 2026-03-10T07:08:17.198 INFO:teuthology.orchestra.run.vm02.stdout: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-10T07:08:17.198 INFO:teuthology.orchestra.run.vm02.stdout: return _run_code(code, main_globals, None, 2026-03-10T07:08:17.198 INFO:teuthology.orchestra.run.vm02.stdout: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-10T07:08:17.198 INFO:teuthology.orchestra.run.vm02.stdout: exec(code, run_globals) 2026-03-10T07:08:17.198 INFO:teuthology.orchestra.run.vm02.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5592, in 2026-03-10T07:08:17.198 INFO:teuthology.orchestra.run.vm02.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5580, in main 2026-03-10T07:08:17.198 INFO:teuthology.orchestra.run.vm02.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3062, in command_deploy_from 2026-03-10T07:08:17.198 INFO:teuthology.orchestra.run.vm02.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3097, in _common_deploy 2026-03-10T07:08:17.198 INFO:teuthology.orchestra.run.vm02.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3117, in _deploy_daemon_container 2026-03-10T07:08:17.198 INFO:teuthology.orchestra.run.vm02.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 1090, in deploy_daemon 2026-03-10T07:08:17.198 INFO:teuthology.orchestra.run.vm02.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 1237, in deploy_daemon_units 2026-03-10T07:08:17.198 INFO:teuthology.orchestra.run.vm02.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/cephadmlib/call_wrappers.py", line 307, in call_throws 2026-03-10T07:08:17.198 INFO:teuthology.orchestra.run.vm02.stdout:RuntimeError: Failed command: systemctl start ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@iscsi.foo.vm02.iphfbm: Job for ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@iscsi.foo.vm02.iphfbm.service failed because the control process exited with error code. 2026-03-10T07:08:17.198 INFO:teuthology.orchestra.run.vm02.stdout:See "systemctl status ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@iscsi.foo.vm02.iphfbm.service" and "journalctl -xeu ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@iscsi.foo.vm02.iphfbm.service" for details. 2026-03-10T07:08:17.270 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph versions | jq -e '"'"'.overall | length == 1'"'"'' 2026-03-10T07:08:17.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:08:17 vm02 ceph-mon[87980]: from='client.34488 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:08:17.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:08:17 vm02 ceph-mon[87980]: from='client.34551 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:08:17.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:08:17 vm02 ceph-mon[87980]: from='client.? 192.168.123.102:0/776998594' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T07:08:17.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:08:17 vm02 ceph-mon[86149]: from='client.34488 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:08:17.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:08:17 vm02 ceph-mon[86149]: from='client.34551 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:08:17.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:08:17 vm02 ceph-mon[86149]: from='client.? 192.168.123.102:0/776998594' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T07:08:17.841 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:08:17 vm05 ceph-mon[76604]: from='client.34488 -' entity='client.iscsi.foo.vm02.iphfbm' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-10T07:08:17.841 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:08:17 vm05 ceph-mon[76604]: from='client.34551 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:08:17.841 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:08:17 vm05 ceph-mon[76604]: from='client.? 192.168.123.102:0/776998594' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T07:08:17.916 INFO:teuthology.orchestra.run.vm02.stdout:true 2026-03-10T07:08:17.980 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph versions | jq -e '"'"'.overall | keys'"'"' | grep $sha1' 2026-03-10T07:08:18.667 INFO:teuthology.orchestra.run.vm02.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)" 2026-03-10T07:08:18.718 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch ls | grep '"'"'^osd '"'"'' 2026-03-10T07:08:18.747 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:08:18 vm02 ceph-mon[86149]: pgmap v200: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:08:18.747 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:08:18 vm02 ceph-mon[86149]: from='client.? 192.168.123.102:0/1754499783' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:08:18.747 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:08:18 vm02 ceph-mon[87980]: pgmap v200: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:08:18.747 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:08:18 vm02 ceph-mon[87980]: from='client.? 192.168.123.102:0/1754499783' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:08:18.865 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:08:18 vm05 ceph-mon[76604]: pgmap v200: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:08:18.865 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:08:18 vm05 ceph-mon[76604]: from='client.? 192.168.123.102:0/1754499783' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:08:19.018 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:08:18 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[81637]: ts=2026-03-10T07:08:18.847Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=1 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T07:08:19.018 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:08:18 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[81637]: ts=2026-03-10T07:08:18.850Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T07:08:19.302 INFO:teuthology.orchestra.run.vm02.stdout:osd 8 92s ago - 2026-03-10T07:08:19.364 INFO:teuthology.run_tasks:Running task cephadm.shell... 2026-03-10T07:08:19.366 INFO:tasks.cephadm:Running commands on role mon.a host ubuntu@vm02.local 2026-03-10T07:08:19.366 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc -- bash -c 'ceph orch upgrade ls' 2026-03-10T07:08:19.568 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:08:19 vm02 ceph-mon[86149]: from='client.? 192.168.123.102:0/3182889254' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:08:19.569 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:08:19 vm02 ceph-mon[87980]: from='client.? 192.168.123.102:0/3182889254' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:08:20.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:08:19 vm05 ceph-mon[76604]: from='client.? 192.168.123.102:0/3182889254' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-10T07:08:20.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:08:20 vm02 ceph-mon[87980]: from='client.34569 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:08:20.835 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:08:20 vm02 ceph-mon[87980]: pgmap v201: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:08:20.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:08:20 vm02 ceph-mon[86149]: from='client.34569 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:08:20.835 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:08:20 vm02 ceph-mon[86149]: pgmap v201: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:08:21.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:08:20 vm05 ceph-mon[76604]: from='client.34569 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:08:21.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:08:20 vm05 ceph-mon[76604]: pgmap v201: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:08:21.420 INFO:teuthology.orchestra.run.vm02.stdout:{ 2026-03-10T07:08:21.420 INFO:teuthology.orchestra.run.vm02.stdout: "image": "quay.io/ceph/ceph", 2026-03-10T07:08:21.420 INFO:teuthology.orchestra.run.vm02.stdout: "registry": "quay.io", 2026-03-10T07:08:21.420 INFO:teuthology.orchestra.run.vm02.stdout: "bare_image": "ceph/ceph", 2026-03-10T07:08:21.420 INFO:teuthology.orchestra.run.vm02.stdout: "versions": [ 2026-03-10T07:08:21.420 INFO:teuthology.orchestra.run.vm02.stdout: "20.2.0", 2026-03-10T07:08:21.420 INFO:teuthology.orchestra.run.vm02.stdout: "20.1.1", 2026-03-10T07:08:21.420 INFO:teuthology.orchestra.run.vm02.stdout: "20.1.0", 2026-03-10T07:08:21.420 INFO:teuthology.orchestra.run.vm02.stdout: "19.2.3", 2026-03-10T07:08:21.420 INFO:teuthology.orchestra.run.vm02.stdout: "19.2.2", 2026-03-10T07:08:21.420 INFO:teuthology.orchestra.run.vm02.stdout: "19.2.1", 2026-03-10T07:08:21.420 INFO:teuthology.orchestra.run.vm02.stdout: "19.2.0" 2026-03-10T07:08:21.420 INFO:teuthology.orchestra.run.vm02.stdout: ] 2026-03-10T07:08:21.420 INFO:teuthology.orchestra.run.vm02.stdout:} 2026-03-10T07:08:21.470 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc -- bash -c 'ceph orch upgrade ls --image quay.io/ceph/ceph --show-all-versions | grep 16.2.0' 2026-03-10T07:08:21.674 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:08:21 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: ::ffff:192.168.123.105 - - [10/Mar/2026:07:08:21] "GET /metrics HTTP/1.1" 200 38334 "" "Prometheus/2.51.0" 2026-03-10T07:08:21.675 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:08:21 vm02 ceph-mon[86149]: from='client.34575 -' entity='client.admin' cmd=[{"prefix": "orch upgrade ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:08:21.675 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:08:21 vm02 ceph-mon[87980]: from='client.34575 -' entity='client.admin' cmd=[{"prefix": "orch upgrade ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:08:22.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:08:21 vm05 ceph-mon[76604]: from='client.34575 -' entity='client.admin' cmd=[{"prefix": "orch upgrade ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:08:23.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:08:22 vm05 ceph-mon[76604]: pgmap v202: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:08:23.084 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:08:22 vm02 ceph-mon[87980]: pgmap v202: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:08:23.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:08:22 vm02 ceph-mon[86149]: pgmap v202: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-10T07:08:23.393 INFO:teuthology.orchestra.run.vm02.stdout: "16.2.0", 2026-03-10T07:08:23.437 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc -- bash -c 'ceph orch upgrade ls --image quay.io/ceph/ceph --tags | grep v16.2.2' 2026-03-10T07:08:23.646 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:08:23 vm02 ceph-mon[87980]: from='client.44485 -' entity='client.admin' cmd=[{"prefix": "orch upgrade ls", "image": "quay.io/ceph/ceph", "show_all_versions": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:08:23.646 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:08:23 vm02 ceph-mon[86149]: from='client.44485 -' entity='client.admin' cmd=[{"prefix": "orch upgrade ls", "image": "quay.io/ceph/ceph", "show_all_versions": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:08:24.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:08:23 vm05 ceph-mon[76604]: from='client.44485 -' entity='client.admin' cmd=[{"prefix": "orch upgrade ls", "image": "quay.io/ceph/ceph", "show_all_versions": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:08:25.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:08:24 vm05 ceph-mon[76604]: pgmap v203: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:08:25.085 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:08:24 vm02 ceph-mon[87980]: pgmap v203: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:08:25.085 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:08:24 vm02 ceph-mon[86149]: pgmap v203: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-10T07:08:25.483 INFO:teuthology.orchestra.run.vm02.stdout: "v16.2.2", 2026-03-10T07:08:25.483 INFO:teuthology.orchestra.run.vm02.stdout: "v16.2.2-20210505", 2026-03-10T07:08:25.529 DEBUG:teuthology.run_tasks:Unwinding manager cephadm 2026-03-10T07:08:25.531 INFO:tasks.cephadm:Teardown begin 2026-03-10T07:08:25.532 DEBUG:teuthology.orchestra.run.vm02:> sudo rm -f /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 2026-03-10T07:08:25.560 DEBUG:teuthology.orchestra.run.vm05:> sudo rm -f /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 2026-03-10T07:08:25.591 INFO:tasks.cephadm:Disabling cephadm mgr module 2026-03-10T07:08:25.591 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc -- ceph mgr module disable cephadm 2026-03-10T07:08:25.764 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:08:25 vm02 ceph-mon[87980]: from='client.34587 -' entity='client.admin' cmd=[{"prefix": "orch upgrade ls", "image": "quay.io/ceph/ceph", "tags": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:08:25.764 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:08:25 vm02 ceph-mon[87980]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T07:08:25.765 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:08:25 vm02 ceph-mon[86149]: from='client.34587 -' entity='client.admin' cmd=[{"prefix": "orch upgrade ls", "image": "quay.io/ceph/ceph", "tags": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:08:25.765 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:08:25 vm02 ceph-mon[86149]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T07:08:25.858 INFO:teuthology.orchestra.run.vm02.stderr:Error: statfs /etc/ceph/ceph.conf: no such file or directory 2026-03-10T07:08:25.877 DEBUG:teuthology.orchestra.run:got remote process result: 125 2026-03-10T07:08:25.877 INFO:tasks.cephadm:Cleaning up testdir ceph.* files... 2026-03-10T07:08:25.877 DEBUG:teuthology.orchestra.run.vm02:> rm -f /home/ubuntu/cephtest/seed.ceph.conf /home/ubuntu/cephtest/ceph.pub 2026-03-10T07:08:25.896 DEBUG:teuthology.orchestra.run.vm05:> rm -f /home/ubuntu/cephtest/seed.ceph.conf /home/ubuntu/cephtest/ceph.pub 2026-03-10T07:08:25.914 INFO:tasks.cephadm:Stopping all daemons... 2026-03-10T07:08:25.914 INFO:tasks.cephadm.mon.a:Stopping mon.a... 2026-03-10T07:08:25.914 DEBUG:teuthology.orchestra.run.vm02:> sudo systemctl stop ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@mon.a 2026-03-10T07:08:26.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:08:25 vm05 ceph-mon[76604]: from='client.34587 -' entity='client.admin' cmd=[{"prefix": "orch upgrade ls", "image": "quay.io/ceph/ceph", "tags": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T07:08:26.003 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:08:25 vm05 ceph-mon[76604]: from='mgr.24905 192.168.123.102:0/600593683' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T07:08:26.050 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:08:25 vm02 systemd[1]: Stopping Ceph mon.a for 28bd35e6-1c4e-11f1-9057-21b3549603fc... 2026-03-10T07:08:26.320 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:08:26 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: [10/Mar/2026:07:08:26] ENGINE Bus STOPPING 2026-03-10T07:08:26.320 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:08:26 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: [10/Mar/2026:07:08:26] ENGINE HTTP Server cherrypy._cpwsgi_server.CPWSGIServer(('::', 9283)) shut down 2026-03-10T07:08:26.320 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:08:26 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: [10/Mar/2026:07:08:26] ENGINE Bus STOPPED 2026-03-10T07:08:26.320 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:08:26 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: [10/Mar/2026:07:08:26] ENGINE Bus STARTING 2026-03-10T07:08:26.320 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:08:26 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: [10/Mar/2026:07:08:26] ENGINE Serving on http://:::9283 2026-03-10T07:08:26.320 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:08:26 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y[82612]: [10/Mar/2026:07:08:26] ENGINE Bus STARTED 2026-03-10T07:08:26.320 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:08:26 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mon-a[86145]: 2026-03-10T07:08:26.049+0000 7fa60bb36640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-mon -n mon.a -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false (PID: 1) UID: 0 2026-03-10T07:08:26.320 INFO:journalctl@ceph.mon.a.vm02.stdout:Mar 10 07:08:26 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mon-a[86145]: 2026-03-10T07:08:26.049+0000 7fa60bb36640 -1 mon.a@0(leader) e4 *** Got Signal Terminated *** 2026-03-10T07:08:26.408 DEBUG:teuthology.orchestra.run.vm02:> sudo pkill -f 'journalctl -f -n 0 -u ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@mon.a.service' 2026-03-10T07:08:26.444 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-10T07:08:26.444 INFO:tasks.cephadm.mon.a:Stopped mon.a 2026-03-10T07:08:26.444 INFO:tasks.cephadm.mon.b:Stopping mon.c... 2026-03-10T07:08:26.444 DEBUG:teuthology.orchestra.run.vm02:> sudo systemctl stop ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@mon.c 2026-03-10T07:08:26.584 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:08:26 vm02 systemd[1]: Stopping Ceph mon.c for 28bd35e6-1c4e-11f1-9057-21b3549603fc... 2026-03-10T07:08:26.863 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:08:26 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mon-c[87976]: 2026-03-10T07:08:26.594+0000 7f3e946fd640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-mon -n mon.c -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false (PID: 1) UID: 0 2026-03-10T07:08:26.863 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:08:26 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mon-c[87976]: 2026-03-10T07:08:26.594+0000 7f3e946fd640 -1 mon.c@1(peon) e4 *** Got Signal Terminated *** 2026-03-10T07:08:26.863 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:08:26 vm02 podman[106568]: 2026-03-10 07:08:26.77958073 +0000 UTC m=+0.198592150 container died 959c5054b3d979eeab9c66308ac0737005f2f2cf5cdfefc67da8e1d45942e1ea (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mon-c, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.build-date=20260223, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, ceph=True, io.buildah.version=1.41.3, CEPH_REF=squid, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-10T07:08:26.863 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:08:26 vm02 podman[106568]: 2026-03-10 07:08:26.803861577 +0000 UTC m=+0.222872997 container remove 959c5054b3d979eeab9c66308ac0737005f2f2cf5cdfefc67da8e1d45942e1ea (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mon-c, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-10T07:08:26.863 INFO:journalctl@ceph.mon.c.vm02.stdout:Mar 10 07:08:26 vm02 bash[106568]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mon-c 2026-03-10T07:08:26.871 DEBUG:teuthology.orchestra.run.vm02:> sudo pkill -f 'journalctl -f -n 0 -u ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@mon.c.service' 2026-03-10T07:08:26.906 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-10T07:08:26.906 INFO:tasks.cephadm.mon.b:Stopped mon.c 2026-03-10T07:08:26.906 INFO:tasks.cephadm.mon.b:Stopping mon.b... 2026-03-10T07:08:26.906 DEBUG:teuthology.orchestra.run.vm05:> sudo systemctl stop ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@mon.b 2026-03-10T07:08:27.208 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:08:26 vm05 systemd[1]: Stopping Ceph mon.b for 28bd35e6-1c4e-11f1-9057-21b3549603fc... 2026-03-10T07:08:27.208 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:08:27 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mon-b[76600]: 2026-03-10T07:08:27.028+0000 7f9dc57fd640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-mon -n mon.b -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false (PID: 1) UID: 0 2026-03-10T07:08:27.208 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:08:27 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mon-b[76600]: 2026-03-10T07:08:27.028+0000 7f9dc57fd640 -1 mon.b@2(peon) e4 *** Got Signal Terminated *** 2026-03-10T07:08:27.208 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:08:27 vm05 podman[91245]: 2026-03-10 07:08:27.120179113 +0000 UTC m=+0.113793777 container died e901cead026d8f9cf45cbc65aa6e3098096c69ecb92fe6da5c6e5904b52f52a6 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mon-b, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, io.buildah.version=1.41.3, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.build-date=20260223, CEPH_REF=squid, org.opencontainers.image.authors=Ceph Release Team , FROM_IMAGE=quay.io/centos/centos:stream9) 2026-03-10T07:08:27.208 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:08:27 vm05 podman[91245]: 2026-03-10 07:08:27.140768941 +0000 UTC m=+0.134383605 container remove e901cead026d8f9cf45cbc65aa6e3098096c69ecb92fe6da5c6e5904b52f52a6 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mon-b, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, CEPH_REF=squid, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, io.buildah.version=1.41.3, org.label-schema.build-date=20260223) 2026-03-10T07:08:27.208 INFO:journalctl@ceph.mon.b.vm05.stdout:Mar 10 07:08:27 vm05 bash[91245]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mon-b 2026-03-10T07:08:27.217 DEBUG:teuthology.orchestra.run.vm05:> sudo pkill -f 'journalctl -f -n 0 -u ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@mon.b.service' 2026-03-10T07:08:27.258 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:08:26 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[81637]: ts=2026-03-10T07:08:26.952Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T07:08:27.258 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:08:26 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[81637]: ts=2026-03-10T07:08:26.953Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T07:08:27.260 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-10T07:08:27.260 INFO:tasks.cephadm.mon.b:Stopped mon.b 2026-03-10T07:08:27.260 INFO:tasks.cephadm.mgr.y:Stopping mgr.y... 2026-03-10T07:08:27.260 DEBUG:teuthology.orchestra.run.vm02:> sudo systemctl stop ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@mgr.y 2026-03-10T07:08:27.520 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:08:27 vm02 systemd[1]: Stopping Ceph mgr.y for 28bd35e6-1c4e-11f1-9057-21b3549603fc... 2026-03-10T07:08:27.520 INFO:journalctl@ceph.mgr.y.vm02.stdout:Mar 10 07:08:27 vm02 podman[106671]: 2026-03-10 07:08:27.424288411 +0000 UTC m=+0.070734943 container died e1dc22afcf314ee0c1b06506296457fec689e5563f24878e13ed47e58d48c885 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-y, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, ceph=True, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, CEPH_REF=squid, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, OSD_FLAVOR=default, org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9) 2026-03-10T07:08:27.590 DEBUG:teuthology.orchestra.run.vm02:> sudo pkill -f 'journalctl -f -n 0 -u ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@mgr.y.service' 2026-03-10T07:08:27.629 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-10T07:08:27.630 INFO:tasks.cephadm.mgr.y:Stopped mgr.y 2026-03-10T07:08:27.630 INFO:tasks.cephadm.mgr.x:Stopping mgr.x... 2026-03-10T07:08:27.630 DEBUG:teuthology.orchestra.run.vm05:> sudo systemctl stop ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@mgr.x 2026-03-10T07:08:27.889 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:08:27 vm05 systemd[1]: Stopping Ceph mgr.x for 28bd35e6-1c4e-11f1-9057-21b3549603fc... 2026-03-10T07:08:27.889 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:08:27 vm05 podman[91347]: 2026-03-10 07:08:27.798877236 +0000 UTC m=+0.065855893 container died cdd4c8db2d430ac47226294b8bab09acdd7eb30d260abaca17924bac5fedd82f (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, OSD_FLAVOR=default, io.buildah.version=1.41.3, CEPH_REF=squid, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.vendor=CentOS, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df) 2026-03-10T07:08:27.889 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:08:27 vm05 podman[91347]: 2026-03-10 07:08:27.823466368 +0000 UTC m=+0.090445025 container remove cdd4c8db2d430ac47226294b8bab09acdd7eb30d260abaca17924bac5fedd82f (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, org.label-schema.schema-version=1.0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=squid, OSD_FLAVOR=default, ceph=True, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-10T07:08:27.889 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:08:27 vm05 bash[91347]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-mgr-x 2026-03-10T07:08:27.889 INFO:journalctl@ceph.mgr.x.vm05.stdout:Mar 10 07:08:27 vm05 systemd[1]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@mgr.x.service: Main process exited, code=exited, status=143/n/a 2026-03-10T07:08:27.900 DEBUG:teuthology.orchestra.run.vm05:> sudo pkill -f 'journalctl -f -n 0 -u ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@mgr.x.service' 2026-03-10T07:08:27.933 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-10T07:08:27.933 INFO:tasks.cephadm.mgr.x:Stopped mgr.x 2026-03-10T07:08:27.933 INFO:tasks.cephadm.osd.0:Stopping osd.0... 2026-03-10T07:08:27.933 DEBUG:teuthology.orchestra.run.vm02:> sudo systemctl stop ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@osd.0 2026-03-10T07:08:28.335 INFO:journalctl@ceph.osd.0.vm02.stdout:Mar 10 07:08:27 vm02 systemd[1]: Stopping Ceph osd.0 for 28bd35e6-1c4e-11f1-9057-21b3549603fc... 2026-03-10T07:08:28.335 INFO:journalctl@ceph.osd.0.vm02.stdout:Mar 10 07:08:28 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-0[90493]: 2026-03-10T07:08:28.044+0000 7f52d7d01640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-10T07:08:28.335 INFO:journalctl@ceph.osd.0.vm02.stdout:Mar 10 07:08:28 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-0[90493]: 2026-03-10T07:08:28.044+0000 7f52d7d01640 -1 osd.0 132 *** Got signal Terminated *** 2026-03-10T07:08:28.335 INFO:journalctl@ceph.osd.0.vm02.stdout:Mar 10 07:08:28 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-0[90493]: 2026-03-10T07:08:28.044+0000 7f52d7d01640 -1 osd.0 132 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-10T07:08:29.335 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:08:28 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[81637]: ts=2026-03-10T07:08:28.850Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=1 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T07:08:29.335 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:08:28 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[81637]: ts=2026-03-10T07:08:28.851Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T07:08:33.334 INFO:journalctl@ceph.osd.0.vm02.stdout:Mar 10 07:08:33 vm02 podman[106776]: 2026-03-10 07:08:33.080653882 +0000 UTC m=+5.050706228 container died bf86ac25f7fbda38a41e101b3916217cdbf20636caaabd79bd5587d55d44ce66 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-0, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, org.label-schema.build-date=20260223, org.opencontainers.image.authors=Ceph Release Team ) 2026-03-10T07:08:33.334 INFO:journalctl@ceph.osd.0.vm02.stdout:Mar 10 07:08:33 vm02 podman[106776]: 2026-03-10 07:08:33.106186631 +0000 UTC m=+5.076238977 container remove bf86ac25f7fbda38a41e101b3916217cdbf20636caaabd79bd5587d55d44ce66 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, CEPH_REF=squid, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, ceph=True, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.license=GPLv2, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9) 2026-03-10T07:08:33.334 INFO:journalctl@ceph.osd.0.vm02.stdout:Mar 10 07:08:33 vm02 bash[106776]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-0 2026-03-10T07:08:33.334 INFO:journalctl@ceph.osd.0.vm02.stdout:Mar 10 07:08:33 vm02 podman[106843]: 2026-03-10 07:08:33.285163205 +0000 UTC m=+0.023544108 container create cd3765823eff0f6bafceafbc0c2921b5da247c08987883a6ec561c0c44e07ca3 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-0-deactivate, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team , ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=squid, OSD_FLAVOR=default, io.buildah.version=1.41.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.license=GPLv2, org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9) 2026-03-10T07:08:33.334 INFO:journalctl@ceph.osd.0.vm02.stdout:Mar 10 07:08:33 vm02 podman[106843]: 2026-03-10 07:08:33.328264377 +0000 UTC m=+0.066645279 container init cd3765823eff0f6bafceafbc0c2921b5da247c08987883a6ec561c0c44e07ca3 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-0-deactivate, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, CEPH_REF=squid, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS) 2026-03-10T07:08:33.513 DEBUG:teuthology.orchestra.run.vm02:> sudo pkill -f 'journalctl -f -n 0 -u ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@osd.0.service' 2026-03-10T07:08:33.548 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-10T07:08:33.548 INFO:tasks.cephadm.osd.0:Stopped osd.0 2026-03-10T07:08:33.548 INFO:tasks.cephadm.osd.1:Stopping osd.1... 2026-03-10T07:08:33.548 DEBUG:teuthology.orchestra.run.vm02:> sudo systemctl stop ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@osd.1 2026-03-10T07:08:34.085 INFO:journalctl@ceph.osd.1.vm02.stdout:Mar 10 07:08:33 vm02 systemd[1]: Stopping Ceph osd.1 for 28bd35e6-1c4e-11f1-9057-21b3549603fc... 2026-03-10T07:08:34.085 INFO:journalctl@ceph.osd.1.vm02.stdout:Mar 10 07:08:33 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-1[92568]: 2026-03-10T07:08:33.708+0000 7f349d7ba640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-10T07:08:34.085 INFO:journalctl@ceph.osd.1.vm02.stdout:Mar 10 07:08:33 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-1[92568]: 2026-03-10T07:08:33.708+0000 7f349d7ba640 -1 osd.1 132 *** Got signal Terminated *** 2026-03-10T07:08:34.085 INFO:journalctl@ceph.osd.1.vm02.stdout:Mar 10 07:08:33 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-1[92568]: 2026-03-10T07:08:33.708+0000 7f349d7ba640 -1 osd.1 132 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-10T07:08:37.335 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:08:36 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[81637]: ts=2026-03-10T07:08:36.952Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T07:08:37.335 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:08:36 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[81637]: ts=2026-03-10T07:08:36.953Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T07:08:38.995 INFO:journalctl@ceph.osd.1.vm02.stdout:Mar 10 07:08:38 vm02 podman[106941]: 2026-03-10 07:08:38.739123581 +0000 UTC m=+5.044281973 container died 6a939dbe6adcad61430e0ca7862a1f559c5e5089c646ad8ee12a83ef98643fe5 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-1, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team , FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=squid, ceph=True, org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default) 2026-03-10T07:08:38.995 INFO:journalctl@ceph.osd.1.vm02.stdout:Mar 10 07:08:38 vm02 podman[106941]: 2026-03-10 07:08:38.768808693 +0000 UTC m=+5.073967075 container remove 6a939dbe6adcad61430e0ca7862a1f559c5e5089c646ad8ee12a83ef98643fe5 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-1, CEPH_REF=squid, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.vendor=CentOS, org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, io.buildah.version=1.41.3, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image) 2026-03-10T07:08:38.995 INFO:journalctl@ceph.osd.1.vm02.stdout:Mar 10 07:08:38 vm02 bash[106941]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-1 2026-03-10T07:08:38.995 INFO:journalctl@ceph.osd.1.vm02.stdout:Mar 10 07:08:38 vm02 podman[107009]: 2026-03-10 07:08:38.915001567 +0000 UTC m=+0.020474363 container create f7ce7b4acf3edab84bba6de87b4fc1e5a54b32a23ebb34c08ccc5134928cc6df (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-1-deactivate, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, CEPH_REF=squid, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, org.label-schema.schema-version=1.0) 2026-03-10T07:08:38.995 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:08:38 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[81637]: ts=2026-03-10T07:08:38.851Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=1 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T07:08:38.995 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:08:38 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[81637]: ts=2026-03-10T07:08:38.852Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://vm02.local:8443/api/prometheus_receiver\": dial tcp 192.168.123.102:8443: connect: connection refused" 2026-03-10T07:08:39.164 DEBUG:teuthology.orchestra.run.vm02:> sudo pkill -f 'journalctl -f -n 0 -u ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@osd.1.service' 2026-03-10T07:08:39.198 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-10T07:08:39.198 INFO:tasks.cephadm.osd.1:Stopped osd.1 2026-03-10T07:08:39.199 INFO:tasks.cephadm.osd.2:Stopping osd.2... 2026-03-10T07:08:39.199 DEBUG:teuthology.orchestra.run.vm02:> sudo systemctl stop ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@osd.2 2026-03-10T07:08:39.585 INFO:journalctl@ceph.osd.2.vm02.stdout:Mar 10 07:08:39 vm02 systemd[1]: Stopping Ceph osd.2 for 28bd35e6-1c4e-11f1-9057-21b3549603fc... 2026-03-10T07:08:39.585 INFO:journalctl@ceph.osd.2.vm02.stdout:Mar 10 07:08:39 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-2[94506]: 2026-03-10T07:08:39.356+0000 7fb8f313d640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-10T07:08:39.585 INFO:journalctl@ceph.osd.2.vm02.stdout:Mar 10 07:08:39 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-2[94506]: 2026-03-10T07:08:39.356+0000 7fb8f313d640 -1 osd.2 132 *** Got signal Terminated *** 2026-03-10T07:08:39.585 INFO:journalctl@ceph.osd.2.vm02.stdout:Mar 10 07:08:39 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-2[94506]: 2026-03-10T07:08:39.356+0000 7fb8f313d640 -1 osd.2 132 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-10T07:08:44.643 INFO:journalctl@ceph.osd.2.vm02.stdout:Mar 10 07:08:44 vm02 podman[107106]: 2026-03-10 07:08:44.38593969 +0000 UTC m=+5.043121121 container died b0f12ddcd6b3b42d718532df48aa3ce40bb8ee654c51d57d5a71076128c55e06 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-2, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_REF=squid, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20260223, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.name=CentOS Stream 9 Base Image) 2026-03-10T07:08:44.643 INFO:journalctl@ceph.osd.2.vm02.stdout:Mar 10 07:08:44 vm02 podman[107106]: 2026-03-10 07:08:44.485045953 +0000 UTC m=+5.142227384 container remove b0f12ddcd6b3b42d718532df48aa3ce40bb8ee654c51d57d5a71076128c55e06 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-2, org.label-schema.build-date=20260223, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team , OSD_FLAVOR=default) 2026-03-10T07:08:44.643 INFO:journalctl@ceph.osd.2.vm02.stdout:Mar 10 07:08:44 vm02 bash[107106]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-2 2026-03-10T07:08:44.862 DEBUG:teuthology.orchestra.run.vm02:> sudo pkill -f 'journalctl -f -n 0 -u ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@osd.2.service' 2026-03-10T07:08:44.901 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-10T07:08:44.902 INFO:tasks.cephadm.osd.2:Stopped osd.2 2026-03-10T07:08:44.902 INFO:tasks.cephadm.osd.3:Stopping osd.3... 2026-03-10T07:08:44.902 DEBUG:teuthology.orchestra.run.vm02:> sudo systemctl stop ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@osd.3 2026-03-10T07:08:45.335 INFO:journalctl@ceph.osd.3.vm02.stdout:Mar 10 07:08:44 vm02 systemd[1]: Stopping Ceph osd.3 for 28bd35e6-1c4e-11f1-9057-21b3549603fc... 2026-03-10T07:08:45.335 INFO:journalctl@ceph.osd.3.vm02.stdout:Mar 10 07:08:45 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-3[96609]: 2026-03-10T07:08:45.048+0000 7f7640d81640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.3 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-10T07:08:45.335 INFO:journalctl@ceph.osd.3.vm02.stdout:Mar 10 07:08:45 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-3[96609]: 2026-03-10T07:08:45.048+0000 7f7640d81640 -1 osd.3 132 *** Got signal Terminated *** 2026-03-10T07:08:45.335 INFO:journalctl@ceph.osd.3.vm02.stdout:Mar 10 07:08:45 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-3[96609]: 2026-03-10T07:08:45.048+0000 7f7640d81640 -1 osd.3 132 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-10T07:08:47.286 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:08:46 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[81637]: ts=2026-03-10T07:08:46.953Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T07:08:47.286 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:08:46 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[81637]: ts=2026-03-10T07:08:46.954Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T07:08:49.334 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:08:48 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[81637]: ts=2026-03-10T07:08:48.852Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=1 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://vm02.local:8443/api/prometheus_receiver\": dial tcp 192.168.123.102:8443: connect: connection refused" 2026-03-10T07:08:49.335 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:08:48 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[81637]: ts=2026-03-10T07:08:48.853Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://vm02.local:8443/api/prometheus_receiver\": dial tcp 192.168.123.102:8443: connect: connection refused" 2026-03-10T07:08:50.345 INFO:journalctl@ceph.osd.3.vm02.stdout:Mar 10 07:08:50 vm02 podman[107268]: 2026-03-10 07:08:50.091098522 +0000 UTC m=+5.055596577 container died 396c050a0b4f77668a63b8750966d7d823a865daa29f8619379186d29f56b0a3 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-3, org.label-schema.build-date=20260223, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid, OSD_FLAVOR=default, ceph=True, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9) 2026-03-10T07:08:50.345 INFO:journalctl@ceph.osd.3.vm02.stdout:Mar 10 07:08:50 vm02 podman[107268]: 2026-03-10 07:08:50.1112447 +0000 UTC m=+5.075742755 container remove 396c050a0b4f77668a63b8750966d7d823a865daa29f8619379186d29f56b0a3 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-3, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.build-date=20260223, org.label-schema.vendor=CentOS, OSD_FLAVOR=default) 2026-03-10T07:08:50.345 INFO:journalctl@ceph.osd.3.vm02.stdout:Mar 10 07:08:50 vm02 bash[107268]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-3 2026-03-10T07:08:50.345 INFO:journalctl@ceph.osd.3.vm02.stdout:Mar 10 07:08:50 vm02 podman[107346]: 2026-03-10 07:08:50.253105626 +0000 UTC m=+0.019690786 container create 19c1150a519baecc8fd04a60e9a1d78c54195a8077680d1cc67b9ce81fa2a207 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-3-deactivate, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, org.label-schema.build-date=20260223, org.opencontainers.image.authors=Ceph Release Team , GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, CEPH_REF=squid, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9) 2026-03-10T07:08:50.345 INFO:journalctl@ceph.osd.3.vm02.stdout:Mar 10 07:08:50 vm02 podman[107346]: 2026-03-10 07:08:50.298999625 +0000 UTC m=+0.065584785 container init 19c1150a519baecc8fd04a60e9a1d78c54195a8077680d1cc67b9ce81fa2a207 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-3-deactivate, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2) 2026-03-10T07:08:50.345 INFO:journalctl@ceph.osd.3.vm02.stdout:Mar 10 07:08:50 vm02 podman[107346]: 2026-03-10 07:08:50.303339918 +0000 UTC m=+0.069925078 container start 19c1150a519baecc8fd04a60e9a1d78c54195a8077680d1cc67b9ce81fa2a207 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-3-deactivate, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, CEPH_REF=squid, ceph=True, OSD_FLAVOR=default, org.label-schema.build-date=20260223, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.authors=Ceph Release Team ) 2026-03-10T07:08:50.345 INFO:journalctl@ceph.osd.3.vm02.stdout:Mar 10 07:08:50 vm02 podman[107346]: 2026-03-10 07:08:50.310192154 +0000 UTC m=+0.076777314 container attach 19c1150a519baecc8fd04a60e9a1d78c54195a8077680d1cc67b9ce81fa2a207 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-3-deactivate, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=squid, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.schema-version=1.0, org.label-schema.build-date=20260223, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df) 2026-03-10T07:08:50.345 INFO:journalctl@ceph.osd.3.vm02.stdout:Mar 10 07:08:50 vm02 podman[107346]: 2026-03-10 07:08:50.245774805 +0000 UTC m=+0.012359975 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T07:08:50.475 DEBUG:teuthology.orchestra.run.vm02:> sudo pkill -f 'journalctl -f -n 0 -u ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@osd.3.service' 2026-03-10T07:08:50.517 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-10T07:08:50.517 INFO:tasks.cephadm.osd.3:Stopped osd.3 2026-03-10T07:08:50.517 INFO:tasks.cephadm.osd.4:Stopping osd.4... 2026-03-10T07:08:50.517 DEBUG:teuthology.orchestra.run.vm05:> sudo systemctl stop ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@osd.4 2026-03-10T07:08:50.896 INFO:journalctl@ceph.osd.4.vm05.stdout:Mar 10 07:08:50 vm05 systemd[1]: Stopping Ceph osd.4 for 28bd35e6-1c4e-11f1-9057-21b3549603fc... 2026-03-10T07:08:50.896 INFO:journalctl@ceph.osd.4.vm05.stdout:Mar 10 07:08:50 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-4[79014]: 2026-03-10T07:08:50.621+0000 7f9b30306640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.4 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-10T07:08:50.896 INFO:journalctl@ceph.osd.4.vm05.stdout:Mar 10 07:08:50 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-4[79014]: 2026-03-10T07:08:50.621+0000 7f9b30306640 -1 osd.4 132 *** Got signal Terminated *** 2026-03-10T07:08:50.896 INFO:journalctl@ceph.osd.4.vm05.stdout:Mar 10 07:08:50 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-4[79014]: 2026-03-10T07:08:50.621+0000 7f9b30306640 -1 osd.4 132 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-10T07:08:54.043 INFO:journalctl@ceph.osd.4.vm05.stdout:Mar 10 07:08:53 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-4[79014]: 2026-03-10T07:08:53.768+0000 7f9b2c91f640 -1 osd.4 132 heartbeat_check: no reply from 192.168.123.102:6806 osd.0 since back 2026-03-10T07:08:28.115614+0000 front 2026-03-10T07:08:28.115735+0000 (oldest deadline 2026-03-10T07:08:53.415118+0000) 2026-03-10T07:08:54.503 INFO:journalctl@ceph.osd.6.vm05.stdout:Mar 10 07:08:54 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-6[82857]: 2026-03-10T07:08:54.042+0000 7feda3b58640 -1 osd.6 132 heartbeat_check: no reply from 192.168.123.102:6806 osd.0 since back 2026-03-10T07:08:29.187947+0000 front 2026-03-10T07:08:29.187940+0000 (oldest deadline 2026-03-10T07:08:53.887344+0000) 2026-03-10T07:08:55.003 INFO:journalctl@ceph.osd.4.vm05.stdout:Mar 10 07:08:54 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-4[79014]: 2026-03-10T07:08:54.725+0000 7f9b2c91f640 -1 osd.4 132 heartbeat_check: no reply from 192.168.123.102:6806 osd.0 since back 2026-03-10T07:08:28.115614+0000 front 2026-03-10T07:08:28.115735+0000 (oldest deadline 2026-03-10T07:08:53.415118+0000) 2026-03-10T07:08:55.503 INFO:journalctl@ceph.osd.6.vm05.stdout:Mar 10 07:08:55 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-6[82857]: 2026-03-10T07:08:55.014+0000 7feda3b58640 -1 osd.6 132 heartbeat_check: no reply from 192.168.123.102:6806 osd.0 since back 2026-03-10T07:08:29.187947+0000 front 2026-03-10T07:08:29.187940+0000 (oldest deadline 2026-03-10T07:08:53.887344+0000) 2026-03-10T07:08:55.503 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:08:55 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-7[84778]: 2026-03-10T07:08:55.067+0000 7f64238fc640 -1 osd.7 132 heartbeat_check: no reply from 192.168.123.102:6806 osd.0 since back 2026-03-10T07:08:32.428471+0000 front 2026-03-10T07:08:32.428435+0000 (oldest deadline 2026-03-10T07:08:54.128168+0000) 2026-03-10T07:08:55.986 INFO:journalctl@ceph.osd.4.vm05.stdout:Mar 10 07:08:55 vm05 podman[91449]: 2026-03-10 07:08:55.644920697 +0000 UTC m=+5.038061238 container died 05476fb1f9d8f112d690b6cd2e8b58cc057cd83c3131a134d5a9cf06b2c139d6 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-4, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team , ceph=True, org.label-schema.build-date=20260223, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, OSD_FLAVOR=default, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-10T07:08:55.986 INFO:journalctl@ceph.osd.4.vm05.stdout:Mar 10 07:08:55 vm05 podman[91449]: 2026-03-10 07:08:55.671520483 +0000 UTC m=+5.064661013 container remove 05476fb1f9d8f112d690b6cd2e8b58cc057cd83c3131a134d5a9cf06b2c139d6 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-4, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, OSD_FLAVOR=default, org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=squid, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, io.buildah.version=1.41.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS) 2026-03-10T07:08:55.986 INFO:journalctl@ceph.osd.4.vm05.stdout:Mar 10 07:08:55 vm05 bash[91449]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-4 2026-03-10T07:08:55.986 INFO:journalctl@ceph.osd.4.vm05.stdout:Mar 10 07:08:55 vm05 podman[91526]: 2026-03-10 07:08:55.799665588 +0000 UTC m=+0.016837996 container create bc127fcdad05a570395b583506909abc0e4bc055c48cb1f5ad02fdfddba4366d (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-4-deactivate, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, io.buildah.version=1.41.3, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=squid, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.build-date=20260223, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df) 2026-03-10T07:08:55.986 INFO:journalctl@ceph.osd.4.vm05.stdout:Mar 10 07:08:55 vm05 podman[91526]: 2026-03-10 07:08:55.842112816 +0000 UTC m=+0.059285224 container init bc127fcdad05a570395b583506909abc0e4bc055c48cb1f5ad02fdfddba4366d (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-4-deactivate, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.build-date=20260223, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, ceph=True, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, CEPH_REF=squid) 2026-03-10T07:08:55.986 INFO:journalctl@ceph.osd.4.vm05.stdout:Mar 10 07:08:55 vm05 podman[91526]: 2026-03-10 07:08:55.845864497 +0000 UTC m=+0.063036905 container start bc127fcdad05a570395b583506909abc0e4bc055c48cb1f5ad02fdfddba4366d (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-4-deactivate, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, ceph=True, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_REF=squid, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team , GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20260223) 2026-03-10T07:08:55.986 INFO:journalctl@ceph.osd.4.vm05.stdout:Mar 10 07:08:55 vm05 podman[91526]: 2026-03-10 07:08:55.846675235 +0000 UTC m=+0.063847643 container attach bc127fcdad05a570395b583506909abc0e4bc055c48cb1f5ad02fdfddba4366d (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-4-deactivate, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, ceph=True, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, CEPH_REF=squid, org.label-schema.vendor=CentOS, org.label-schema.build-date=20260223, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-10T07:08:55.986 INFO:journalctl@ceph.osd.4.vm05.stdout:Mar 10 07:08:55 vm05 podman[91526]: 2026-03-10 07:08:55.792131868 +0000 UTC m=+0.009304276 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T07:08:55.986 INFO:journalctl@ceph.osd.4.vm05.stdout:Mar 10 07:08:55 vm05 podman[91526]: 2026-03-10 07:08:55.985953689 +0000 UTC m=+0.203126097 container died bc127fcdad05a570395b583506909abc0e4bc055c48cb1f5ad02fdfddba4366d (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-4-deactivate, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True, io.buildah.version=1.41.3, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=squid, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20260223, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-10T07:08:56.013 DEBUG:teuthology.orchestra.run.vm05:> sudo pkill -f 'journalctl -f -n 0 -u ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@osd.4.service' 2026-03-10T07:08:56.058 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-10T07:08:56.058 INFO:tasks.cephadm.osd.4:Stopped osd.4 2026-03-10T07:08:56.058 INFO:tasks.cephadm.osd.5:Stopping osd.5... 2026-03-10T07:08:56.058 DEBUG:teuthology.orchestra.run.vm05:> sudo systemctl stop ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@osd.5 2026-03-10T07:08:56.253 INFO:journalctl@ceph.osd.5.vm05.stdout:Mar 10 07:08:56 vm05 systemd[1]: Stopping Ceph osd.5 for 28bd35e6-1c4e-11f1-9057-21b3549603fc... 2026-03-10T07:08:56.253 INFO:journalctl@ceph.osd.5.vm05.stdout:Mar 10 07:08:56 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-5[80920]: 2026-03-10T07:08:56.215+0000 7fed2fda1640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.5 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-10T07:08:56.253 INFO:journalctl@ceph.osd.5.vm05.stdout:Mar 10 07:08:56 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-5[80920]: 2026-03-10T07:08:56.215+0000 7fed2fda1640 -1 osd.5 132 *** Got signal Terminated *** 2026-03-10T07:08:56.253 INFO:journalctl@ceph.osd.5.vm05.stdout:Mar 10 07:08:56 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-5[80920]: 2026-03-10T07:08:56.215+0000 7fed2fda1640 -1 osd.5 132 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-10T07:08:56.253 INFO:journalctl@ceph.osd.6.vm05.stdout:Mar 10 07:08:56 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-6[82857]: 2026-03-10T07:08:56.018+0000 7feda3b58640 -1 osd.6 132 heartbeat_check: no reply from 192.168.123.102:6806 osd.0 since back 2026-03-10T07:08:29.187947+0000 front 2026-03-10T07:08:29.187940+0000 (oldest deadline 2026-03-10T07:08:53.887344+0000) 2026-03-10T07:08:56.253 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:08:56 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-7[84778]: 2026-03-10T07:08:56.048+0000 7f64238fc640 -1 osd.7 132 heartbeat_check: no reply from 192.168.123.102:6806 osd.0 since back 2026-03-10T07:08:32.428471+0000 front 2026-03-10T07:08:32.428435+0000 (oldest deadline 2026-03-10T07:08:54.128168+0000) 2026-03-10T07:08:57.134 INFO:journalctl@ceph.osd.5.vm05.stdout:Mar 10 07:08:56 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-5[80920]: 2026-03-10T07:08:56.808+0000 7fed2bbb9640 -1 osd.5 132 heartbeat_check: no reply from 192.168.123.102:6806 osd.0 since back 2026-03-10T07:08:32.029122+0000 front 2026-03-10T07:08:32.029156+0000 (oldest deadline 2026-03-10T07:08:56.128904+0000) 2026-03-10T07:08:57.134 INFO:journalctl@ceph.osd.6.vm05.stdout:Mar 10 07:08:56 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-6[82857]: 2026-03-10T07:08:56.992+0000 7feda3b58640 -1 osd.6 132 heartbeat_check: no reply from 192.168.123.102:6806 osd.0 since back 2026-03-10T07:08:29.187947+0000 front 2026-03-10T07:08:29.187940+0000 (oldest deadline 2026-03-10T07:08:53.887344+0000) 2026-03-10T07:08:57.134 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:08:57 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-7[84778]: 2026-03-10T07:08:57.047+0000 7f64238fc640 -1 osd.7 132 heartbeat_check: no reply from 192.168.123.102:6806 osd.0 since back 2026-03-10T07:08:32.428471+0000 front 2026-03-10T07:08:32.428435+0000 (oldest deadline 2026-03-10T07:08:54.128168+0000) 2026-03-10T07:08:57.335 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:08:56 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[81637]: ts=2026-03-10T07:08:56.953Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T07:08:57.335 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:08:56 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[81637]: ts=2026-03-10T07:08:56.954Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T07:08:58.253 INFO:journalctl@ceph.osd.5.vm05.stdout:Mar 10 07:08:57 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-5[80920]: 2026-03-10T07:08:57.759+0000 7fed2bbb9640 -1 osd.5 132 heartbeat_check: no reply from 192.168.123.102:6806 osd.0 since back 2026-03-10T07:08:32.029122+0000 front 2026-03-10T07:08:32.029156+0000 (oldest deadline 2026-03-10T07:08:56.128904+0000) 2026-03-10T07:08:58.253 INFO:journalctl@ceph.osd.6.vm05.stdout:Mar 10 07:08:57 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-6[82857]: 2026-03-10T07:08:57.984+0000 7feda3b58640 -1 osd.6 132 heartbeat_check: no reply from 192.168.123.102:6806 osd.0 since back 2026-03-10T07:08:29.187947+0000 front 2026-03-10T07:08:29.187940+0000 (oldest deadline 2026-03-10T07:08:53.887344+0000) 2026-03-10T07:08:58.253 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:08:58 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-7[84778]: 2026-03-10T07:08:58.007+0000 7f64238fc640 -1 osd.7 132 heartbeat_check: no reply from 192.168.123.102:6806 osd.0 since back 2026-03-10T07:08:32.428471+0000 front 2026-03-10T07:08:32.428435+0000 (oldest deadline 2026-03-10T07:08:54.128168+0000) 2026-03-10T07:08:59.253 INFO:journalctl@ceph.osd.5.vm05.stdout:Mar 10 07:08:58 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-5[80920]: 2026-03-10T07:08:58.788+0000 7fed2bbb9640 -1 osd.5 132 heartbeat_check: no reply from 192.168.123.102:6806 osd.0 since back 2026-03-10T07:08:32.029122+0000 front 2026-03-10T07:08:32.029156+0000 (oldest deadline 2026-03-10T07:08:56.128904+0000) 2026-03-10T07:08:59.253 INFO:journalctl@ceph.osd.6.vm05.stdout:Mar 10 07:08:58 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-6[82857]: 2026-03-10T07:08:58.975+0000 7feda3b58640 -1 osd.6 132 heartbeat_check: no reply from 192.168.123.102:6806 osd.0 since back 2026-03-10T07:08:29.187947+0000 front 2026-03-10T07:08:29.187940+0000 (oldest deadline 2026-03-10T07:08:53.887344+0000) 2026-03-10T07:08:59.253 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:08:58 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-7[84778]: 2026-03-10T07:08:58.971+0000 7f64238fc640 -1 osd.7 132 heartbeat_check: no reply from 192.168.123.102:6806 osd.0 since back 2026-03-10T07:08:32.428471+0000 front 2026-03-10T07:08:32.428435+0000 (oldest deadline 2026-03-10T07:08:54.128168+0000) 2026-03-10T07:08:59.334 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:08:58 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[81637]: ts=2026-03-10T07:08:58.853Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=1 err="ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://vm02.local:8443/api/prometheus_receiver\": dial tcp 192.168.123.102:8443: connect: connection refused" 2026-03-10T07:08:59.334 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:08:58 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[81637]: ts=2026-03-10T07:08:58.854Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://vm02.local:8443/api/prometheus_receiver\": dial tcp 192.168.123.102:8443: connect: connection refused" 2026-03-10T07:09:00.253 INFO:journalctl@ceph.osd.5.vm05.stdout:Mar 10 07:08:59 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-5[80920]: 2026-03-10T07:08:59.760+0000 7fed2bbb9640 -1 osd.5 132 heartbeat_check: no reply from 192.168.123.102:6806 osd.0 since back 2026-03-10T07:08:32.029122+0000 front 2026-03-10T07:08:32.029156+0000 (oldest deadline 2026-03-10T07:08:56.128904+0000) 2026-03-10T07:09:00.253 INFO:journalctl@ceph.osd.5.vm05.stdout:Mar 10 07:08:59 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-5[80920]: 2026-03-10T07:08:59.760+0000 7fed2bbb9640 -1 osd.5 132 heartbeat_check: no reply from 192.168.123.102:6814 osd.1 since back 2026-03-10T07:08:36.129639+0000 front 2026-03-10T07:08:36.129342+0000 (oldest deadline 2026-03-10T07:08:59.629130+0000) 2026-03-10T07:09:00.253 INFO:journalctl@ceph.osd.6.vm05.stdout:Mar 10 07:08:59 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-6[82857]: 2026-03-10T07:08:59.958+0000 7feda3b58640 -1 osd.6 132 heartbeat_check: no reply from 192.168.123.102:6806 osd.0 since back 2026-03-10T07:08:29.187947+0000 front 2026-03-10T07:08:29.187940+0000 (oldest deadline 2026-03-10T07:08:53.887344+0000) 2026-03-10T07:09:00.253 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:08:59 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-7[84778]: 2026-03-10T07:08:59.927+0000 7f64238fc640 -1 osd.7 132 heartbeat_check: no reply from 192.168.123.102:6806 osd.0 since back 2026-03-10T07:08:32.428471+0000 front 2026-03-10T07:08:32.428435+0000 (oldest deadline 2026-03-10T07:08:54.128168+0000) 2026-03-10T07:09:01.003 INFO:journalctl@ceph.osd.5.vm05.stdout:Mar 10 07:09:00 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-5[80920]: 2026-03-10T07:09:00.717+0000 7fed2bbb9640 -1 osd.5 132 heartbeat_check: no reply from 192.168.123.102:6806 osd.0 since back 2026-03-10T07:08:32.029122+0000 front 2026-03-10T07:08:32.029156+0000 (oldest deadline 2026-03-10T07:08:56.128904+0000) 2026-03-10T07:09:01.003 INFO:journalctl@ceph.osd.5.vm05.stdout:Mar 10 07:09:00 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-5[80920]: 2026-03-10T07:09:00.717+0000 7fed2bbb9640 -1 osd.5 132 heartbeat_check: no reply from 192.168.123.102:6814 osd.1 since back 2026-03-10T07:08:36.129639+0000 front 2026-03-10T07:08:36.129342+0000 (oldest deadline 2026-03-10T07:08:59.629130+0000) 2026-03-10T07:09:01.003 INFO:journalctl@ceph.osd.6.vm05.stdout:Mar 10 07:09:00 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-6[82857]: 2026-03-10T07:09:00.956+0000 7feda3b58640 -1 osd.6 132 heartbeat_check: no reply from 192.168.123.102:6806 osd.0 since back 2026-03-10T07:08:29.187947+0000 front 2026-03-10T07:08:29.187940+0000 (oldest deadline 2026-03-10T07:08:53.887344+0000) 2026-03-10T07:09:01.003 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:09:00 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-7[84778]: 2026-03-10T07:09:00.924+0000 7f64238fc640 -1 osd.7 132 heartbeat_check: no reply from 192.168.123.102:6806 osd.0 since back 2026-03-10T07:08:32.428471+0000 front 2026-03-10T07:08:32.428435+0000 (oldest deadline 2026-03-10T07:08:54.128168+0000) 2026-03-10T07:09:01.492 INFO:journalctl@ceph.osd.5.vm05.stdout:Mar 10 07:09:01 vm05 podman[91622]: 2026-03-10 07:09:01.241944699 +0000 UTC m=+5.042556571 container died 37ab69a8d103d624f93ea4d257d70726835cafb62026e492b9a03356ed073590 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-5, ceph=True, CEPH_REF=squid, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0) 2026-03-10T07:09:01.492 INFO:journalctl@ceph.osd.5.vm05.stdout:Mar 10 07:09:01 vm05 podman[91622]: 2026-03-10 07:09:01.274261665 +0000 UTC m=+5.074873537 container remove 37ab69a8d103d624f93ea4d257d70726835cafb62026e492b9a03356ed073590 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-5, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team , GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, CEPH_REF=squid, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default) 2026-03-10T07:09:01.492 INFO:journalctl@ceph.osd.5.vm05.stdout:Mar 10 07:09:01 vm05 bash[91622]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-5 2026-03-10T07:09:01.492 INFO:journalctl@ceph.osd.5.vm05.stdout:Mar 10 07:09:01 vm05 podman[91692]: 2026-03-10 07:09:01.437297455 +0000 UTC m=+0.018252996 container create c1a0a9fd9ad51a8239d3de367b27bafa2000b503c7cff4590d637941a3667adc (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-5-deactivate, io.buildah.version=1.41.3, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.license=GPLv2, org.label-schema.build-date=20260223, CEPH_REF=squid, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df) 2026-03-10T07:09:01.492 INFO:journalctl@ceph.osd.5.vm05.stdout:Mar 10 07:09:01 vm05 podman[91692]: 2026-03-10 07:09:01.482201601 +0000 UTC m=+0.063157151 container init c1a0a9fd9ad51a8239d3de367b27bafa2000b503c7cff4590d637941a3667adc (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-5-deactivate, org.label-schema.schema-version=1.0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20260223, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True, io.buildah.version=1.41.3, CEPH_REF=squid, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9) 2026-03-10T07:09:01.492 INFO:journalctl@ceph.osd.5.vm05.stdout:Mar 10 07:09:01 vm05 podman[91692]: 2026-03-10 07:09:01.486245179 +0000 UTC m=+0.067200719 container start c1a0a9fd9ad51a8239d3de367b27bafa2000b503c7cff4590d637941a3667adc (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-5-deactivate, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.build-date=20260223, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.name=CentOS Stream 9 Base Image) 2026-03-10T07:09:01.674 DEBUG:teuthology.orchestra.run.vm05:> sudo pkill -f 'journalctl -f -n 0 -u ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@osd.5.service' 2026-03-10T07:09:01.713 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-10T07:09:01.713 INFO:tasks.cephadm.osd.5:Stopped osd.5 2026-03-10T07:09:01.713 INFO:tasks.cephadm.osd.6:Stopping osd.6... 2026-03-10T07:09:01.713 DEBUG:teuthology.orchestra.run.vm05:> sudo systemctl stop ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@osd.6 2026-03-10T07:09:02.253 INFO:journalctl@ceph.osd.6.vm05.stdout:Mar 10 07:09:01 vm05 systemd[1]: Stopping Ceph osd.6 for 28bd35e6-1c4e-11f1-9057-21b3549603fc... 2026-03-10T07:09:02.254 INFO:journalctl@ceph.osd.6.vm05.stdout:Mar 10 07:09:01 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-6[82857]: 2026-03-10T07:09:01.873+0000 7feda753f640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.6 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-10T07:09:02.254 INFO:journalctl@ceph.osd.6.vm05.stdout:Mar 10 07:09:01 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-6[82857]: 2026-03-10T07:09:01.873+0000 7feda753f640 -1 osd.6 132 *** Got signal Terminated *** 2026-03-10T07:09:02.254 INFO:journalctl@ceph.osd.6.vm05.stdout:Mar 10 07:09:01 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-6[82857]: 2026-03-10T07:09:01.873+0000 7feda753f640 -1 osd.6 132 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-10T07:09:02.254 INFO:journalctl@ceph.osd.6.vm05.stdout:Mar 10 07:09:02 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-6[82857]: 2026-03-10T07:09:02.001+0000 7feda3b58640 -1 osd.6 132 heartbeat_check: no reply from 192.168.123.102:6806 osd.0 since back 2026-03-10T07:08:29.187947+0000 front 2026-03-10T07:08:29.187940+0000 (oldest deadline 2026-03-10T07:08:53.887344+0000) 2026-03-10T07:09:02.254 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:09:01 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-7[84778]: 2026-03-10T07:09:01.941+0000 7f64238fc640 -1 osd.7 132 heartbeat_check: no reply from 192.168.123.102:6806 osd.0 since back 2026-03-10T07:08:32.428471+0000 front 2026-03-10T07:08:32.428435+0000 (oldest deadline 2026-03-10T07:08:54.128168+0000) 2026-03-10T07:09:03.253 INFO:journalctl@ceph.osd.6.vm05.stdout:Mar 10 07:09:02 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-6[82857]: 2026-03-10T07:09:02.962+0000 7feda3b58640 -1 osd.6 132 heartbeat_check: no reply from 192.168.123.102:6806 osd.0 since back 2026-03-10T07:08:29.187947+0000 front 2026-03-10T07:08:29.187940+0000 (oldest deadline 2026-03-10T07:08:53.887344+0000) 2026-03-10T07:09:03.253 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:09:02 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-7[84778]: 2026-03-10T07:09:02.935+0000 7f64238fc640 -1 osd.7 132 heartbeat_check: no reply from 192.168.123.102:6806 osd.0 since back 2026-03-10T07:08:32.428471+0000 front 2026-03-10T07:08:32.428435+0000 (oldest deadline 2026-03-10T07:08:54.128168+0000) 2026-03-10T07:09:04.253 INFO:journalctl@ceph.osd.6.vm05.stdout:Mar 10 07:09:03 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-6[82857]: 2026-03-10T07:09:03.977+0000 7feda3b58640 -1 osd.6 132 heartbeat_check: no reply from 192.168.123.102:6806 osd.0 since back 2026-03-10T07:08:29.187947+0000 front 2026-03-10T07:08:29.187940+0000 (oldest deadline 2026-03-10T07:08:53.887344+0000) 2026-03-10T07:09:04.253 INFO:journalctl@ceph.osd.6.vm05.stdout:Mar 10 07:09:03 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-6[82857]: 2026-03-10T07:09:03.977+0000 7feda3b58640 -1 osd.6 132 heartbeat_check: no reply from 192.168.123.102:6814 osd.1 since back 2026-03-10T07:08:37.988197+0000 front 2026-03-10T07:08:37.988294+0000 (oldest deadline 2026-03-10T07:09:03.287893+0000) 2026-03-10T07:09:04.253 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:09:03 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-7[84778]: 2026-03-10T07:09:03.919+0000 7f64238fc640 -1 osd.7 132 heartbeat_check: no reply from 192.168.123.102:6806 osd.0 since back 2026-03-10T07:08:32.428471+0000 front 2026-03-10T07:08:32.428435+0000 (oldest deadline 2026-03-10T07:08:54.128168+0000) 2026-03-10T07:09:04.253 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:09:03 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-7[84778]: 2026-03-10T07:09:03.919+0000 7f64238fc640 -1 osd.7 132 heartbeat_check: no reply from 192.168.123.102:6814 osd.1 since back 2026-03-10T07:08:38.228975+0000 front 2026-03-10T07:08:38.228933+0000 (oldest deadline 2026-03-10T07:09:03.528640+0000) 2026-03-10T07:09:05.253 INFO:journalctl@ceph.osd.6.vm05.stdout:Mar 10 07:09:04 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-6[82857]: 2026-03-10T07:09:04.942+0000 7feda3b58640 -1 osd.6 132 heartbeat_check: no reply from 192.168.123.102:6806 osd.0 since back 2026-03-10T07:08:29.187947+0000 front 2026-03-10T07:08:29.187940+0000 (oldest deadline 2026-03-10T07:08:53.887344+0000) 2026-03-10T07:09:05.253 INFO:journalctl@ceph.osd.6.vm05.stdout:Mar 10 07:09:04 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-6[82857]: 2026-03-10T07:09:04.942+0000 7feda3b58640 -1 osd.6 132 heartbeat_check: no reply from 192.168.123.102:6814 osd.1 since back 2026-03-10T07:08:37.988197+0000 front 2026-03-10T07:08:37.988294+0000 (oldest deadline 2026-03-10T07:09:03.287893+0000) 2026-03-10T07:09:05.253 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:09:04 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-7[84778]: 2026-03-10T07:09:04.931+0000 7f64238fc640 -1 osd.7 132 heartbeat_check: no reply from 192.168.123.102:6806 osd.0 since back 2026-03-10T07:08:32.428471+0000 front 2026-03-10T07:08:32.428435+0000 (oldest deadline 2026-03-10T07:08:54.128168+0000) 2026-03-10T07:09:05.253 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:09:04 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-7[84778]: 2026-03-10T07:09:04.931+0000 7f64238fc640 -1 osd.7 132 heartbeat_check: no reply from 192.168.123.102:6814 osd.1 since back 2026-03-10T07:08:38.228975+0000 front 2026-03-10T07:08:38.228933+0000 (oldest deadline 2026-03-10T07:09:03.528640+0000) 2026-03-10T07:09:06.253 INFO:journalctl@ceph.osd.6.vm05.stdout:Mar 10 07:09:05 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-6[82857]: 2026-03-10T07:09:05.945+0000 7feda3b58640 -1 osd.6 132 heartbeat_check: no reply from 192.168.123.102:6806 osd.0 since back 2026-03-10T07:08:29.187947+0000 front 2026-03-10T07:08:29.187940+0000 (oldest deadline 2026-03-10T07:08:53.887344+0000) 2026-03-10T07:09:06.253 INFO:journalctl@ceph.osd.6.vm05.stdout:Mar 10 07:09:05 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-6[82857]: 2026-03-10T07:09:05.945+0000 7feda3b58640 -1 osd.6 132 heartbeat_check: no reply from 192.168.123.102:6814 osd.1 since back 2026-03-10T07:08:37.988197+0000 front 2026-03-10T07:08:37.988294+0000 (oldest deadline 2026-03-10T07:09:03.287893+0000) 2026-03-10T07:09:06.253 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:09:05 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-7[84778]: 2026-03-10T07:09:05.888+0000 7f64238fc640 -1 osd.7 132 heartbeat_check: no reply from 192.168.123.102:6806 osd.0 since back 2026-03-10T07:08:32.428471+0000 front 2026-03-10T07:08:32.428435+0000 (oldest deadline 2026-03-10T07:08:54.128168+0000) 2026-03-10T07:09:06.253 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:09:05 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-7[84778]: 2026-03-10T07:09:05.888+0000 7f64238fc640 -1 osd.7 132 heartbeat_check: no reply from 192.168.123.102:6814 osd.1 since back 2026-03-10T07:08:38.228975+0000 front 2026-03-10T07:08:38.228933+0000 (oldest deadline 2026-03-10T07:09:03.528640+0000) 2026-03-10T07:09:07.171 INFO:journalctl@ceph.osd.6.vm05.stdout:Mar 10 07:09:06 vm05 podman[91791]: 2026-03-10 07:09:06.915347927 +0000 UTC m=+5.058541080 container died affae9a808f3e5e6d40a6c29be51d9f6a3469778290d112eab458cd556d63f0c (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-6, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, OSD_FLAVOR=default, CEPH_REF=squid, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True) 2026-03-10T07:09:07.171 INFO:journalctl@ceph.osd.6.vm05.stdout:Mar 10 07:09:06 vm05 podman[91791]: 2026-03-10 07:09:06.94687777 +0000 UTC m=+5.090070923 container remove affae9a808f3e5e6d40a6c29be51d9f6a3469778290d112eab458cd556d63f0c (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-6, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.build-date=20260223, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, CEPH_REF=squid, io.buildah.version=1.41.3, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True) 2026-03-10T07:09:07.171 INFO:journalctl@ceph.osd.6.vm05.stdout:Mar 10 07:09:06 vm05 bash[91791]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-6 2026-03-10T07:09:07.171 INFO:journalctl@ceph.osd.6.vm05.stdout:Mar 10 07:09:07 vm05 podman[91856]: 2026-03-10 07:09:07.095788152 +0000 UTC m=+0.017533729 container create 8bb72dc36767f6703defa866310e6c201899e625064f97c4e01416ed199b878d (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-6-deactivate, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, io.buildah.version=1.41.3, org.opencontainers.image.authors=Ceph Release Team , FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.schema-version=1.0, CEPH_REF=squid, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20260223, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image) 2026-03-10T07:09:07.171 INFO:journalctl@ceph.osd.6.vm05.stdout:Mar 10 07:09:07 vm05 podman[91856]: 2026-03-10 07:09:07.132279352 +0000 UTC m=+0.054024929 container init 8bb72dc36767f6703defa866310e6c201899e625064f97c4e01416ed199b878d (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-6-deactivate, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.label-schema.build-date=20260223, io.buildah.version=1.41.3) 2026-03-10T07:09:07.171 INFO:journalctl@ceph.osd.6.vm05.stdout:Mar 10 07:09:07 vm05 podman[91856]: 2026-03-10 07:09:07.139187131 +0000 UTC m=+0.060932708 container start 8bb72dc36767f6703defa866310e6c201899e625064f97c4e01416ed199b878d (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-6-deactivate, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, ceph=True, org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.41.3, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2) 2026-03-10T07:09:07.171 INFO:journalctl@ceph.osd.6.vm05.stdout:Mar 10 07:09:07 vm05 podman[91856]: 2026-03-10 07:09:07.140390233 +0000 UTC m=+0.062135810 container attach 8bb72dc36767f6703defa866310e6c201899e625064f97c4e01416ed199b878d (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-6-deactivate, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=squid, org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, org.opencontainers.image.authors=Ceph Release Team , OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git) 2026-03-10T07:09:07.171 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:09:06 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-7[84778]: 2026-03-10T07:09:06.893+0000 7f64238fc640 -1 osd.7 132 heartbeat_check: no reply from 192.168.123.102:6806 osd.0 since back 2026-03-10T07:08:32.428471+0000 front 2026-03-10T07:08:32.428435+0000 (oldest deadline 2026-03-10T07:08:54.128168+0000) 2026-03-10T07:09:07.171 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:09:06 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-7[84778]: 2026-03-10T07:09:06.893+0000 7f64238fc640 -1 osd.7 132 heartbeat_check: no reply from 192.168.123.102:6814 osd.1 since back 2026-03-10T07:08:38.228975+0000 front 2026-03-10T07:08:38.228933+0000 (oldest deadline 2026-03-10T07:09:03.528640+0000) 2026-03-10T07:09:07.324 DEBUG:teuthology.orchestra.run.vm05:> sudo pkill -f 'journalctl -f -n 0 -u ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@osd.6.service' 2026-03-10T07:09:07.335 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:09:06 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[81637]: ts=2026-03-10T07:09:06.954Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T07:09:07.335 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:09:06 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[81637]: ts=2026-03-10T07:09:06.955Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-10T07:09:07.360 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-10T07:09:07.360 INFO:tasks.cephadm.osd.6:Stopped osd.6 2026-03-10T07:09:07.360 INFO:tasks.cephadm.osd.7:Stopping osd.7... 2026-03-10T07:09:07.360 DEBUG:teuthology.orchestra.run.vm05:> sudo systemctl stop ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@osd.7 2026-03-10T07:09:07.439 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:09:07 vm05 systemd[1]: Stopping Ceph osd.7 for 28bd35e6-1c4e-11f1-9057-21b3549603fc... 2026-03-10T07:09:07.753 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:09:07 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-7[84778]: 2026-03-10T07:09:07.509+0000 7f6427ae4640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.7 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-10T07:09:07.753 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:09:07 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-7[84778]: 2026-03-10T07:09:07.509+0000 7f6427ae4640 -1 osd.7 132 *** Got signal Terminated *** 2026-03-10T07:09:07.753 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:09:07 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-7[84778]: 2026-03-10T07:09:07.509+0000 7f6427ae4640 -1 osd.7 132 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-10T07:09:08.253 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:09:07 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-7[84778]: 2026-03-10T07:09:07.894+0000 7f64238fc640 -1 osd.7 132 heartbeat_check: no reply from 192.168.123.102:6806 osd.0 since back 2026-03-10T07:08:32.428471+0000 front 2026-03-10T07:08:32.428435+0000 (oldest deadline 2026-03-10T07:08:54.128168+0000) 2026-03-10T07:09:08.253 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:09:07 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-7[84778]: 2026-03-10T07:09:07.894+0000 7f64238fc640 -1 osd.7 132 heartbeat_check: no reply from 192.168.123.102:6814 osd.1 since back 2026-03-10T07:08:38.228975+0000 front 2026-03-10T07:08:38.228933+0000 (oldest deadline 2026-03-10T07:09:03.528640+0000) 2026-03-10T07:09:08.253 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:09:07 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-7[84778]: 2026-03-10T07:09:07.894+0000 7f64238fc640 -1 osd.7 132 heartbeat_check: no reply from 192.168.123.102:6822 osd.2 since back 2026-03-10T07:08:43.529375+0000 front 2026-03-10T07:08:43.529468+0000 (oldest deadline 2026-03-10T07:09:07.028895+0000) 2026-03-10T07:09:09.253 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:09:08 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-7[84778]: 2026-03-10T07:09:08.872+0000 7f64238fc640 -1 osd.7 132 heartbeat_check: no reply from 192.168.123.102:6806 osd.0 since back 2026-03-10T07:08:32.428471+0000 front 2026-03-10T07:08:32.428435+0000 (oldest deadline 2026-03-10T07:08:54.128168+0000) 2026-03-10T07:09:09.253 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:09:08 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-7[84778]: 2026-03-10T07:09:08.872+0000 7f64238fc640 -1 osd.7 132 heartbeat_check: no reply from 192.168.123.102:6814 osd.1 since back 2026-03-10T07:08:38.228975+0000 front 2026-03-10T07:08:38.228933+0000 (oldest deadline 2026-03-10T07:09:03.528640+0000) 2026-03-10T07:09:09.253 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:09:08 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-7[84778]: 2026-03-10T07:09:08.872+0000 7f64238fc640 -1 osd.7 132 heartbeat_check: no reply from 192.168.123.102:6822 osd.2 since back 2026-03-10T07:08:43.529375+0000 front 2026-03-10T07:08:43.529468+0000 (oldest deadline 2026-03-10T07:09:07.028895+0000) 2026-03-10T07:09:09.335 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:09:08 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[81637]: ts=2026-03-10T07:09:08.854Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=1 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://vm02.local:8443/api/prometheus_receiver\": dial tcp 192.168.123.102:8443: connect: connection refused" 2026-03-10T07:09:09.335 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:09:08 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[81637]: ts=2026-03-10T07:09:08.855Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://vm02.local:8443/api/prometheus_receiver\": dial tcp 192.168.123.102:8443: connect: connection refused" 2026-03-10T07:09:10.253 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:09:09 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-7[84778]: 2026-03-10T07:09:09.915+0000 7f64238fc640 -1 osd.7 132 heartbeat_check: no reply from 192.168.123.102:6806 osd.0 since back 2026-03-10T07:08:32.428471+0000 front 2026-03-10T07:08:32.428435+0000 (oldest deadline 2026-03-10T07:08:54.128168+0000) 2026-03-10T07:09:10.253 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:09:09 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-7[84778]: 2026-03-10T07:09:09.915+0000 7f64238fc640 -1 osd.7 132 heartbeat_check: no reply from 192.168.123.102:6814 osd.1 since back 2026-03-10T07:08:38.228975+0000 front 2026-03-10T07:08:38.228933+0000 (oldest deadline 2026-03-10T07:09:03.528640+0000) 2026-03-10T07:09:10.253 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:09:09 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-7[84778]: 2026-03-10T07:09:09.915+0000 7f64238fc640 -1 osd.7 132 heartbeat_check: no reply from 192.168.123.102:6822 osd.2 since back 2026-03-10T07:08:43.529375+0000 front 2026-03-10T07:08:43.529468+0000 (oldest deadline 2026-03-10T07:09:07.028895+0000) 2026-03-10T07:09:11.253 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:09:10 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-7[84778]: 2026-03-10T07:09:10.953+0000 7f64238fc640 -1 osd.7 132 heartbeat_check: no reply from 192.168.123.102:6806 osd.0 since back 2026-03-10T07:08:32.428471+0000 front 2026-03-10T07:08:32.428435+0000 (oldest deadline 2026-03-10T07:08:54.128168+0000) 2026-03-10T07:09:11.253 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:09:10 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-7[84778]: 2026-03-10T07:09:10.953+0000 7f64238fc640 -1 osd.7 132 heartbeat_check: no reply from 192.168.123.102:6814 osd.1 since back 2026-03-10T07:08:38.228975+0000 front 2026-03-10T07:08:38.228933+0000 (oldest deadline 2026-03-10T07:09:03.528640+0000) 2026-03-10T07:09:11.253 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:09:10 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-7[84778]: 2026-03-10T07:09:10.953+0000 7f64238fc640 -1 osd.7 132 heartbeat_check: no reply from 192.168.123.102:6822 osd.2 since back 2026-03-10T07:08:43.529375+0000 front 2026-03-10T07:08:43.529468+0000 (oldest deadline 2026-03-10T07:09:07.028895+0000) 2026-03-10T07:09:12.253 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:09:11 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-7[84778]: 2026-03-10T07:09:11.975+0000 7f64238fc640 -1 osd.7 132 heartbeat_check: no reply from 192.168.123.102:6806 osd.0 since back 2026-03-10T07:08:32.428471+0000 front 2026-03-10T07:08:32.428435+0000 (oldest deadline 2026-03-10T07:08:54.128168+0000) 2026-03-10T07:09:12.367 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:09:11 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-7[84778]: 2026-03-10T07:09:11.975+0000 7f64238fc640 -1 osd.7 132 heartbeat_check: no reply from 192.168.123.102:6814 osd.1 since back 2026-03-10T07:08:38.228975+0000 front 2026-03-10T07:08:38.228933+0000 (oldest deadline 2026-03-10T07:09:03.528640+0000) 2026-03-10T07:09:12.367 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:09:11 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-7[84778]: 2026-03-10T07:09:11.975+0000 7f64238fc640 -1 osd.7 132 heartbeat_check: no reply from 192.168.123.102:6822 osd.2 since back 2026-03-10T07:08:43.529375+0000 front 2026-03-10T07:08:43.529468+0000 (oldest deadline 2026-03-10T07:09:07.028895+0000) 2026-03-10T07:09:12.956 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:09:12 vm05 podman[91951]: 2026-03-10 07:09:12.537793199 +0000 UTC m=+5.042804616 container died d45f9e33923c28e203e214aae9478ec9919f30631ad5361f1695b61c7e802871 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-7, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.build-date=20260223, CEPH_REF=squid, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.authors=Ceph Release Team , OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.vendor=CentOS) 2026-03-10T07:09:13.218 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:09:12 vm05 podman[91951]: 2026-03-10 07:09:12.956298063 +0000 UTC m=+5.461309480 container remove d45f9e33923c28e203e214aae9478ec9919f30631ad5361f1695b61c7e802871 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-7, org.opencontainers.image.authors=Ceph Release Team , FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20260223, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git) 2026-03-10T07:09:13.218 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:09:12 vm05 bash[91951]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-7 2026-03-10T07:09:13.218 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:09:13 vm05 podman[92017]: 2026-03-10 07:09:13.127118232 +0000 UTC m=+0.021281823 container create f3b91416c728e501e79dc3f39e1d8eb1bd3bc201314c2ba5561a033793bc93dd (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-7-deactivate, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, OSD_FLAVOR=default, org.label-schema.build-date=20260223, ceph=True, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_REF=squid, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team , FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS) 2026-03-10T07:09:13.218 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:09:13 vm05 podman[92017]: 2026-03-10 07:09:13.174383488 +0000 UTC m=+0.068547079 container init f3b91416c728e501e79dc3f39e1d8eb1bd3bc201314c2ba5561a033793bc93dd (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-7-deactivate, ceph=True, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid, OSD_FLAVOR=default, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.41.3, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9) 2026-03-10T07:09:13.218 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:09:13 vm05 podman[92017]: 2026-03-10 07:09:13.178876005 +0000 UTC m=+0.073039596 container start f3b91416c728e501e79dc3f39e1d8eb1bd3bc201314c2ba5561a033793bc93dd (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-7-deactivate, io.buildah.version=1.41.3, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20260223, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9) 2026-03-10T07:09:13.218 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:09:13 vm05 podman[92017]: 2026-03-10 07:09:13.180602588 +0000 UTC m=+0.074766179 container attach f3b91416c728e501e79dc3f39e1d8eb1bd3bc201314c2ba5561a033793bc93dd (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-7-deactivate, CEPH_REF=squid, org.label-schema.build-date=20260223, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, ceph=True, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.authors=Ceph Release Team , OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0) 2026-03-10T07:09:13.396 DEBUG:teuthology.orchestra.run.vm05:> sudo pkill -f 'journalctl -f -n 0 -u ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@osd.7.service' 2026-03-10T07:09:13.471 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:09:13 vm05 podman[92017]: 2026-03-10 07:09:13.119500705 +0000 UTC m=+0.013664305 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T07:09:13.471 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:09:13 vm05 podman[92036]: 2026-03-10 07:09:13.357175674 +0000 UTC m=+0.013020542 container died f3b91416c728e501e79dc3f39e1d8eb1bd3bc201314c2ba5561a033793bc93dd (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-7-deactivate, org.label-schema.schema-version=1.0, CEPH_REF=squid, org.opencontainers.image.authors=Ceph Release Team , OSD_FLAVOR=default, io.buildah.version=1.41.3, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.vendor=CentOS, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-10T07:09:13.471 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:09:13 vm05 podman[92036]: 2026-03-10 07:09:13.379616726 +0000 UTC m=+0.035461594 container remove f3b91416c728e501e79dc3f39e1d8eb1bd3bc201314c2ba5561a033793bc93dd (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-osd-7-deactivate, org.label-schema.build-date=20260223, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team , FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid, ceph=True, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image) 2026-03-10T07:09:13.471 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:09:13 vm05 systemd[1]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@osd.7.service: Deactivated successfully. 2026-03-10T07:09:13.471 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:09:13 vm05 systemd[1]: Stopped Ceph osd.7 for 28bd35e6-1c4e-11f1-9057-21b3549603fc. 2026-03-10T07:09:13.471 INFO:journalctl@ceph.osd.7.vm05.stdout:Mar 10 07:09:13 vm05 systemd[1]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@osd.7.service: Consumed 2.283s CPU time. 2026-03-10T07:09:13.483 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-10T07:09:13.483 INFO:tasks.cephadm.osd.7:Stopped osd.7 2026-03-10T07:09:13.483 INFO:tasks.cephadm.prometheus.a:Stopping prometheus.a... 2026-03-10T07:09:13.483 DEBUG:teuthology.orchestra.run.vm05:> sudo systemctl stop ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@prometheus.a 2026-03-10T07:09:13.743 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:09:13 vm05 systemd[1]: Stopping Ceph prometheus.a for 28bd35e6-1c4e-11f1-9057-21b3549603fc... 2026-03-10T07:09:13.743 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:09:13 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:09:13.640Z caller=main.go:964 level=warn msg="Received SIGTERM, exiting gracefully..." 2026-03-10T07:09:13.743 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:09:13 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:09:13.640Z caller=main.go:988 level=info msg="Stopping scrape discovery manager..." 2026-03-10T07:09:13.743 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:09:13 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:09:13.640Z caller=main.go:1002 level=info msg="Stopping notify discovery manager..." 2026-03-10T07:09:13.743 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:09:13 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:09:13.640Z caller=manager.go:177 level=info component="rule manager" msg="Stopping rule manager..." 2026-03-10T07:09:13.743 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:09:13 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:09:13.640Z caller=manager.go:187 level=info component="rule manager" msg="Rule manager stopped" 2026-03-10T07:09:13.743 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:09:13 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:09:13.640Z caller=main.go:1039 level=info msg="Stopping scrape manager..." 2026-03-10T07:09:13.743 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:09:13 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:09:13.640Z caller=main.go:984 level=info msg="Scrape discovery manager stopped" 2026-03-10T07:09:13.743 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:09:13 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:09:13.640Z caller=main.go:998 level=info msg="Notify discovery manager stopped" 2026-03-10T07:09:13.743 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:09:13 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:09:13.641Z caller=main.go:1031 level=info msg="Scrape manager stopped" 2026-03-10T07:09:13.743 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:09:13 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:09:13.643Z caller=notifier.go:618 level=info component=notifier msg="Stopping notification manager..." 2026-03-10T07:09:13.743 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:09:13 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:09:13.643Z caller=main.go:1261 level=info msg="Notifier manager stopped" 2026-03-10T07:09:13.743 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:09:13 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a[73612]: ts=2026-03-10T07:09:13.643Z caller=main.go:1273 level=info msg="See you next time!" 2026-03-10T07:09:13.743 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:09:13 vm05 podman[92114]: 2026-03-10 07:09:13.654238779 +0000 UTC m=+0.030340499 container died 2f14ff887bc7d1d6dd40d0cdac3a272c9a3d688871404114c080f6cfaaeb2799 (image=quay.io/prometheus/prometheus:v2.51.0, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-10T07:09:13.743 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:09:13 vm05 podman[92114]: 2026-03-10 07:09:13.675710446 +0000 UTC m=+0.051812166 container remove 2f14ff887bc7d1d6dd40d0cdac3a272c9a3d688871404114c080f6cfaaeb2799 (image=quay.io/prometheus/prometheus:v2.51.0, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-10T07:09:13.743 INFO:journalctl@ceph.prometheus.a.vm05.stdout:Mar 10 07:09:13 vm05 bash[92114]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-prometheus-a 2026-03-10T07:09:13.755 DEBUG:teuthology.orchestra.run.vm05:> sudo pkill -f 'journalctl -f -n 0 -u ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@prometheus.a.service' 2026-03-10T07:09:13.790 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-10T07:09:13.790 INFO:tasks.cephadm.prometheus.a:Stopped prometheus.a 2026-03-10T07:09:13.790 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm rm-cluster --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc --force --keep-logs 2026-03-10T07:09:15.207 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 07:09:15 vm02 systemd[1]: Stopping Ceph node-exporter.a for 28bd35e6-1c4e-11f1-9057-21b3549603fc... 2026-03-10T07:09:15.473 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 07:09:15 vm02 podman[107748]: 2026-03-10 07:09:15.284478496 +0000 UTC m=+0.020832083 container died 0bc0b34c732af84a19cf90445e30e5b59051965f4df530eadc55f6ab2d4ff271 (image=quay.io/prometheus/node-exporter:v1.7.0, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a, maintainer=The Prometheus Authors ) 2026-03-10T07:09:15.473 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 07:09:15 vm02 podman[107748]: 2026-03-10 07:09:15.312853404 +0000 UTC m=+0.049206991 container remove 0bc0b34c732af84a19cf90445e30e5b59051965f4df530eadc55f6ab2d4ff271 (image=quay.io/prometheus/node-exporter:v1.7.0, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a, maintainer=The Prometheus Authors ) 2026-03-10T07:09:15.473 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 07:09:15 vm02 bash[107748]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-a 2026-03-10T07:09:15.473 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 07:09:15 vm02 systemd[1]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@node-exporter.a.service: Main process exited, code=exited, status=143/n/a 2026-03-10T07:09:15.473 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 07:09:15 vm02 systemd[1]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@node-exporter.a.service: Failed with result 'exit-code'. 2026-03-10T07:09:15.473 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 07:09:15 vm02 systemd[1]: Stopped Ceph node-exporter.a for 28bd35e6-1c4e-11f1-9057-21b3549603fc. 2026-03-10T07:09:15.473 INFO:journalctl@ceph.node-exporter.a.vm02.stdout:Mar 10 07:09:15 vm02 systemd[1]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@node-exporter.a.service: Consumed 1.179s CPU time. 2026-03-10T07:09:15.835 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:09:15 vm02 systemd[1]: Stopping Ceph alertmanager.a for 28bd35e6-1c4e-11f1-9057-21b3549603fc... 2026-03-10T07:09:15.835 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:09:15 vm02 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a[81637]: ts=2026-03-10T07:09:15.659Z caller=main.go:583 level=info msg="Received SIGTERM, exiting gracefully..." 2026-03-10T07:09:15.835 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:09:15 vm02 podman[107844]: 2026-03-10 07:09:15.671395946 +0000 UTC m=+0.029775402 container died 520cbcc5ad9835ee8cbaf7318bda577029ffd2a5694a8cff9072c845b97e28f0 (image=quay.io/prometheus/alertmanager:v0.25.0, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a, maintainer=The Prometheus Authors ) 2026-03-10T07:09:15.835 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:09:15 vm02 podman[107844]: 2026-03-10 07:09:15.687898624 +0000 UTC m=+0.046278078 container remove 520cbcc5ad9835ee8cbaf7318bda577029ffd2a5694a8cff9072c845b97e28f0 (image=quay.io/prometheus/alertmanager:v0.25.0, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a, maintainer=The Prometheus Authors ) 2026-03-10T07:09:15.835 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:09:15 vm02 podman[107844]: 2026-03-10 07:09:15.689043637 +0000 UTC m=+0.047423102 volume remove dc325bb8f832a7ea520371bd784b9c962a26d99997a0dbd3c5e6e503d0e1ef8c 2026-03-10T07:09:15.835 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:09:15 vm02 bash[107844]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-alertmanager-a 2026-03-10T07:09:15.835 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:09:15 vm02 systemd[1]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@alertmanager.a.service: Deactivated successfully. 2026-03-10T07:09:15.835 INFO:journalctl@ceph.alertmanager.a.vm02.stdout:Mar 10 07:09:15 vm02 systemd[1]: Stopped Ceph alertmanager.a for 28bd35e6-1c4e-11f1-9057-21b3549603fc. 2026-03-10T07:09:47.435 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm rm-cluster --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc --force --keep-logs 2026-03-10T07:09:48.971 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 07:09:48 vm05 systemd[1]: Stopping Ceph node-exporter.b for 28bd35e6-1c4e-11f1-9057-21b3549603fc... 2026-03-10T07:09:48.972 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 07:09:48 vm05 podman[92496]: 2026-03-10 07:09:48.892549859 +0000 UTC m=+0.027077702 container died 0129ed456f9d5feae6aeb1883606933102402884ed39fbb418ae0bf34c0ffc52 (image=quay.io/prometheus/node-exporter:v1.7.0, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b, maintainer=The Prometheus Authors ) 2026-03-10T07:09:48.972 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 07:09:48 vm05 podman[92496]: 2026-03-10 07:09:48.912706525 +0000 UTC m=+0.047234378 container remove 0129ed456f9d5feae6aeb1883606933102402884ed39fbb418ae0bf34c0ffc52 (image=quay.io/prometheus/node-exporter:v1.7.0, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b, maintainer=The Prometheus Authors ) 2026-03-10T07:09:48.972 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 07:09:48 vm05 bash[92496]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-node-exporter-b 2026-03-10T07:09:48.972 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 07:09:48 vm05 systemd[1]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@node-exporter.b.service: Main process exited, code=exited, status=143/n/a 2026-03-10T07:09:49.253 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 07:09:48 vm05 systemd[1]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@node-exporter.b.service: Failed with result 'exit-code'. 2026-03-10T07:09:49.253 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 07:09:48 vm05 systemd[1]: Stopped Ceph node-exporter.b for 28bd35e6-1c4e-11f1-9057-21b3549603fc. 2026-03-10T07:09:49.253 INFO:journalctl@ceph.node-exporter.b.vm05.stdout:Mar 10 07:09:48 vm05 systemd[1]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@node-exporter.b.service: Consumed 1.109s CPU time. 2026-03-10T07:09:49.582 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 07:09:49 vm05 systemd[1]: Stopping Ceph grafana.a for 28bd35e6-1c4e-11f1-9057-21b3549603fc... 2026-03-10T07:09:49.582 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 07:09:49 vm05 ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a[71709]: t=2026-03-10T07:09:49+0000 lvl=info msg="Shutdown started" logger=server reason="System signal: terminated" 2026-03-10T07:09:49.582 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 07:09:49 vm05 podman[92615]: 2026-03-10 07:09:49.351761011 +0000 UTC m=+0.028178102 container died 34567dcb4b514d0f8c2217a5f2563d5e13ea893b75b75582d24a34a0eebc29b9 (image=quay.io/ceph/ceph-grafana:8.3.5, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, maintainer=Paul Cuzner , vcs-ref=3aadd00326f3dd6cfe65ee31017ab98915fddb56, version=8.5, io.k8s.display-name=Red Hat Universal Base Image 8, build-date=2022-03-28T10:36:18.413762, name=ubi8, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.5-236.1648460182, io.buildah.version=1.24.2, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.expose-services=, com.redhat.component=ubi8-container, release=236.1648460182, architecture=x86_64, vendor=Red Hat, Inc., com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, description=Ceph Grafana Container, io.openshift.tags=base rhel8, distribution-scope=public, summary=Grafana Container configured for Ceph mgr/dashboard integration, vcs-type=git) 2026-03-10T07:09:49.582 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 07:09:49 vm05 podman[92615]: 2026-03-10 07:09:49.37095154 +0000 UTC m=+0.047368631 container remove 34567dcb4b514d0f8c2217a5f2563d5e13ea893b75b75582d24a34a0eebc29b9 (image=quay.io/ceph/ceph-grafana:8.3.5, name=ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a, description=Ceph Grafana Container, release=236.1648460182, io.buildah.version=1.24.2, io.k8s.display-name=Red Hat Universal Base Image 8, version=8.5, io.openshift.expose-services=, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, vcs-type=git, name=ubi8, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.5-236.1648460182, architecture=x86_64, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, distribution-scope=public, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-ref=3aadd00326f3dd6cfe65ee31017ab98915fddb56, com.redhat.component=ubi8-container, maintainer=Paul Cuzner , vendor=Red Hat, Inc., io.openshift.tags=base rhel8, summary=Grafana Container configured for Ceph mgr/dashboard integration, build-date=2022-03-28T10:36:18.413762) 2026-03-10T07:09:49.582 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 07:09:49 vm05 bash[92615]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana-a 2026-03-10T07:09:49.582 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 07:09:49 vm05 bash[92634]: Error: no container with name or ID "ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc-grafana.a" found: no such container 2026-03-10T07:09:49.582 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 07:09:49 vm05 systemd[1]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@grafana.a.service: Deactivated successfully. 2026-03-10T07:09:49.582 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 07:09:49 vm05 systemd[1]: Stopped Ceph grafana.a for 28bd35e6-1c4e-11f1-9057-21b3549603fc. 2026-03-10T07:09:49.582 INFO:journalctl@ceph.grafana.a.vm05.stdout:Mar 10 07:09:49 vm05 systemd[1]: ceph-28bd35e6-1c4e-11f1-9057-21b3549603fc@grafana.a.service: Consumed 1.283s CPU time. 2026-03-10T07:10:10.585 DEBUG:teuthology.orchestra.run.vm02:> sudo rm -f /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 2026-03-10T07:10:10.611 DEBUG:teuthology.orchestra.run.vm05:> sudo rm -f /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 2026-03-10T07:10:10.644 INFO:tasks.cephadm:Archiving crash dumps... 2026-03-10T07:10:10.644 DEBUG:teuthology.misc:Transferring archived files from vm02:/var/lib/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/crash to /archive/kyr-2026-03-10_01:00:38-orch-squid-none-default-vps/937/remote/vm02/crash 2026-03-10T07:10:10.644 DEBUG:teuthology.orchestra.run.vm02:> sudo tar c -f - -C /var/lib/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/crash -- . 2026-03-10T07:10:10.675 INFO:teuthology.orchestra.run.vm02.stderr:tar: /var/lib/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/crash: Cannot open: No such file or directory 2026-03-10T07:10:10.675 INFO:teuthology.orchestra.run.vm02.stderr:tar: Error is not recoverable: exiting now 2026-03-10T07:10:10.676 DEBUG:teuthology.misc:Transferring archived files from vm05:/var/lib/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/crash to /archive/kyr-2026-03-10_01:00:38-orch-squid-none-default-vps/937/remote/vm05/crash 2026-03-10T07:10:10.676 DEBUG:teuthology.orchestra.run.vm05:> sudo tar c -f - -C /var/lib/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/crash -- . 2026-03-10T07:10:10.712 INFO:teuthology.orchestra.run.vm05.stderr:tar: /var/lib/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/crash: Cannot open: No such file or directory 2026-03-10T07:10:10.712 INFO:teuthology.orchestra.run.vm05.stderr:tar: Error is not recoverable: exiting now 2026-03-10T07:10:10.713 INFO:tasks.cephadm:Checking cluster log for badness... 2026-03-10T07:10:10.713 DEBUG:teuthology.orchestra.run.vm02:> sudo egrep '\[ERR\]|\[WRN\]|\[SEC\]' /var/log/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/ceph.log | egrep CEPHADM_ | egrep -v '\(MDS_ALL_DOWN\)' | egrep -v '\(MDS_UP_LESS_THAN_MAX\)' | egrep -v CEPHADM_STRAY_DAEMON | egrep -v CEPHADM_FAILED_DAEMON | egrep -v CEPHADM_AGENT_DOWN | head -n 1 2026-03-10T07:10:10.747 INFO:tasks.cephadm:Compressing logs... 2026-03-10T07:10:10.747 DEBUG:teuthology.orchestra.run.vm02:> time sudo find /var/log/ceph /var/log/rbd-target-api -name '*.log' -print0 | sudo xargs --max-args=1 --max-procs=0 --verbose -0 --no-run-if-empty -- gzip -5 --verbose -- 2026-03-10T07:10:10.789 DEBUG:teuthology.orchestra.run.vm05:> time sudo find /var/log/ceph /var/log/rbd-target-api -name '*.log' -print0 | sudo xargs --max-args=1 --max-procs=0 --verbose -0 --no-run-if-empty -- gzip -5 --verbose -- 2026-03-10T07:10:10.814 INFO:teuthology.orchestra.run.vm05.stderr:gzip -5 --verbose -- /var/log/ceph/cephadm.log 2026-03-10T07:10:10.814 INFO:teuthology.orchestra.run.vm05.stderr:find: ‘/var/log/rbd-target-api’: No such file or directory 2026-03-10T07:10:10.814 INFO:teuthology.orchestra.run.vm02.stderr:find: ‘/var/log/rbd-target-api’: No such file or directory 2026-03-10T07:10:10.815 INFO:teuthology.orchestra.run.vm05.stderr:gzip -5 --verbose -- /var/log/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/ceph-volume.log 2026-03-10T07:10:10.815 INFO:teuthology.orchestra.run.vm02.stderr:gzip -5 --verbose -- /var/log/ceph/cephadm.log 2026-03-10T07:10:10.815 INFO:teuthology.orchestra.run.vm02.stderr:gzip -5 --verbose -- /var/log/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/ceph-mon.a.log 2026-03-10T07:10:10.815 INFO:teuthology.orchestra.run.vm02.stderr:gzip -5 --verbose -- /var/log/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/ceph.log 2026-03-10T07:10:10.817 INFO:teuthology.orchestra.run.vm05.stderr:/var/log/ceph/cephadm.log: gzip -5 --verbose -- /var/log/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/ceph-mon.b.log 2026-03-10T07:10:10.820 INFO:teuthology.orchestra.run.vm02.stderr:/var/log/ceph/cephadm.log: /var/log/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/ceph-mon.a.log: 91.1% -- replaced with /var/log/ceph/cephadm.log.gz 2026-03-10T07:10:10.820 INFO:teuthology.orchestra.run.vm02.stderr:gzip -5 --verbose -- /var/log/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/ceph.audit.log 2026-03-10T07:10:10.821 INFO:teuthology.orchestra.run.vm02.stderr:/var/log/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/ceph.log: gzip -5 --verbose -- /var/log/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/ceph-mgr.y.log 2026-03-10T07:10:10.823 INFO:teuthology.orchestra.run.vm05.stderr:/var/log/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/ceph-volume.log: gzip -5 --verbose -- /var/log/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/ceph.cephadm.log 2026-03-10T07:10:10.824 INFO:teuthology.orchestra.run.vm02.stderr:/var/log/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/ceph.audit.log: 92.6% -- replaced with /var/log/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/ceph.log.gz 2026-03-10T07:10:10.825 INFO:teuthology.orchestra.run.vm05.stderr:/var/log/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/ceph-mon.b.log: 93.6% -- replaced with /var/log/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/ceph-volume.log.gz 2026-03-10T07:10:10.825 INFO:teuthology.orchestra.run.vm02.stderr: 94.3% -- replaced with /var/log/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/ceph.audit.log.gz 2026-03-10T07:10:10.825 INFO:teuthology.orchestra.run.vm02.stderr:gzip -5 --verbose -- /var/log/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/ceph.cephadm.log 2026-03-10T07:10:10.826 INFO:teuthology.orchestra.run.vm05.stderr:gzip -5 --verbose -- /var/log/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/ceph.audit.log 2026-03-10T07:10:10.826 INFO:teuthology.orchestra.run.vm05.stderr:/var/log/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/ceph.cephadm.log: 83.0% -- replaced with /var/log/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/ceph.cephadm.log.gz 2026-03-10T07:10:10.826 INFO:teuthology.orchestra.run.vm05.stderr:gzip -5 --verbose -- /var/log/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/ceph.log 2026-03-10T07:10:10.828 INFO:teuthology.orchestra.run.vm05.stderr:/var/log/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/ceph.audit.log: 90.9% -- replaced with /var/log/ceph/cephadm.log.gz 2026-03-10T07:10:10.829 INFO:teuthology.orchestra.run.vm05.stderr: 90.7% -- replaced with /var/log/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/ceph.audit.log.gz 2026-03-10T07:10:10.829 INFO:teuthology.orchestra.run.vm05.stderr:gzip -5 --verbose -- /var/log/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/ceph-mgr.x.log 2026-03-10T07:10:10.830 INFO:teuthology.orchestra.run.vm05.stderr:/var/log/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/ceph.log: 86.9% -- replaced with /var/log/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/ceph.log.gz 2026-03-10T07:10:10.831 INFO:teuthology.orchestra.run.vm05.stderr:gzip -5 --verbose -- /var/log/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/ceph-osd.4.log 2026-03-10T07:10:10.837 INFO:teuthology.orchestra.run.vm02.stderr:/var/log/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/ceph-mgr.y.log: gzip -5 --verbose -- /var/log/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/ceph-volume.log 2026-03-10T07:10:10.838 INFO:teuthology.orchestra.run.vm05.stderr:/var/log/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/ceph-mgr.x.log: gzip -5 --verbose -- /var/log/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/ceph-osd.5.log 2026-03-10T07:10:10.839 INFO:teuthology.orchestra.run.vm02.stderr:/var/log/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/ceph.cephadm.log: 90.5% -- replaced with /var/log/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/ceph.cephadm.log.gz 2026-03-10T07:10:10.843 INFO:teuthology.orchestra.run.vm05.stderr:/var/log/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/ceph-osd.4.log: gzip -5 --verbose -- /var/log/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/ceph-osd.6.log 2026-03-10T07:10:10.843 INFO:teuthology.orchestra.run.vm02.stderr:gzip -5 --verbose -- /var/log/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/ceph-mon.c.log 2026-03-10T07:10:10.852 INFO:teuthology.orchestra.run.vm02.stderr:/var/log/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/ceph-volume.log: gzip -5 --verbose -- /var/log/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/ceph-osd.0.log 2026-03-10T07:10:10.854 INFO:teuthology.orchestra.run.vm05.stderr:/var/log/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/ceph-osd.5.log: gzip -5 --verbose -- /var/log/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/ceph-osd.7.log 2026-03-10T07:10:10.859 INFO:teuthology.orchestra.run.vm02.stderr:/var/log/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/ceph-mon.c.log: gzip -5 --verbose -- /var/log/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/ceph-osd.1.log 2026-03-10T07:10:10.862 INFO:teuthology.orchestra.run.vm02.stderr:/var/log/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/ceph-osd.0.log: 93.7% -- replaced with /var/log/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/ceph-volume.log.gz 2026-03-10T07:10:10.864 INFO:teuthology.orchestra.run.vm05.stderr:/var/log/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/ceph-osd.6.log: gzip -5 --verbose -- /var/log/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/ceph-client.rgw.foo.vm05.bmslvs.log 2026-03-10T07:10:10.870 INFO:teuthology.orchestra.run.vm02.stderr:gzip -5 --verbose -- /var/log/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/ceph-osd.2.log 2026-03-10T07:10:10.871 INFO:teuthology.orchestra.run.vm05.stderr:/var/log/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/ceph-osd.7.log: gzip -5 --verbose -- /var/log/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/ceph-client.rgw.smpl.vm05.xjafam.log 2026-03-10T07:10:10.872 INFO:teuthology.orchestra.run.vm05.stderr:/var/log/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/ceph-client.rgw.foo.vm05.bmslvs.log: 75.8% -- replaced with /var/log/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/ceph-client.rgw.foo.vm05.bmslvs.log.gz 2026-03-10T07:10:10.880 INFO:teuthology.orchestra.run.vm02.stderr:/var/log/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/ceph-osd.1.log: gzip -5 --verbose -- /var/log/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/ceph-osd.3.log 2026-03-10T07:10:10.883 INFO:teuthology.orchestra.run.vm05.stderr:/var/log/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/ceph-client.rgw.smpl.vm05.xjafam.log: 75.8% -- replaced with /var/log/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/ceph-client.rgw.smpl.vm05.xjafam.log.gz 2026-03-10T07:10:10.894 INFO:teuthology.orchestra.run.vm02.stderr:/var/log/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/ceph-osd.2.log: gzip -5 --verbose -- /var/log/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/ceph-client.rgw.foo.vm02.kkmsll.log 2026-03-10T07:10:10.898 INFO:teuthology.orchestra.run.vm05.stderr: 89.9% -- replaced with /var/log/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/ceph-mgr.x.log.gz 2026-03-10T07:10:10.902 INFO:teuthology.orchestra.run.vm02.stderr:/var/log/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/ceph-osd.3.log: gzip -5 --verbose -- /var/log/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/ceph-client.rgw.smpl.vm02.kyvfxo.log 2026-03-10T07:10:10.906 INFO:teuthology.orchestra.run.vm02.stderr:/var/log/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/ceph-client.rgw.foo.vm02.kkmsll.log: 76.2% -- replaced with /var/log/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/ceph-client.rgw.foo.vm02.kkmsll.log.gz 2026-03-10T07:10:10.913 INFO:teuthology.orchestra.run.vm02.stderr:gzip -5 --verbose -- /var/log/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/tcmu-runner.log 2026-03-10T07:10:10.914 INFO:teuthology.orchestra.run.vm02.stderr:/var/log/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/ceph-client.rgw.smpl.vm02.kyvfxo.log: 76.2% -- replaced with /var/log/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/ceph-client.rgw.smpl.vm02.kyvfxo.log.gz 2026-03-10T07:10:10.922 INFO:teuthology.orchestra.run.vm02.stderr:/var/log/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/tcmu-runner.log: 84.3% -- replaced with /var/log/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/tcmu-runner.log.gz 2026-03-10T07:10:11.180 INFO:teuthology.orchestra.run.vm05.stderr: 92.3% -- replaced with /var/log/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/ceph-mon.b.log.gz 2026-03-10T07:10:11.250 INFO:teuthology.orchestra.run.vm02.stderr: 89.2% -- replaced with /var/log/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/ceph-mgr.y.log.gz 2026-03-10T07:10:11.560 INFO:teuthology.orchestra.run.vm02.stderr: 92.2% -- replaced with /var/log/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/ceph-mon.c.log.gz 2026-03-10T07:10:12.009 INFO:teuthology.orchestra.run.vm05.stderr: 93.8% -- replaced with /var/log/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/ceph-osd.6.log.gz 2026-03-10T07:10:12.208 INFO:teuthology.orchestra.run.vm05.stderr: 93.9% -- replaced with /var/log/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/ceph-osd.5.log.gz 2026-03-10T07:10:12.223 INFO:teuthology.orchestra.run.vm05.stderr: 94.1% -- replaced with /var/log/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/ceph-osd.7.log.gz 2026-03-10T07:10:12.276 INFO:teuthology.orchestra.run.vm05.stderr: 94.1% -- replaced with /var/log/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/ceph-osd.4.log.gz 2026-03-10T07:10:12.278 INFO:teuthology.orchestra.run.vm05.stderr: 2026-03-10T07:10:12.278 INFO:teuthology.orchestra.run.vm05.stderr:real 0m1.474s 2026-03-10T07:10:12.278 INFO:teuthology.orchestra.run.vm05.stderr:user 0m2.734s 2026-03-10T07:10:12.278 INFO:teuthology.orchestra.run.vm05.stderr:sys 0m0.146s 2026-03-10T07:10:12.317 INFO:teuthology.orchestra.run.vm02.stderr: 91.1% -- replaced with /var/log/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/ceph-mon.a.log.gz 2026-03-10T07:10:12.318 INFO:teuthology.orchestra.run.vm02.stderr: 93.9% -- replaced with /var/log/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/ceph-osd.2.log.gz 2026-03-10T07:10:12.668 INFO:teuthology.orchestra.run.vm02.stderr: 94.0% -- replaced with /var/log/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/ceph-osd.0.log.gz 2026-03-10T07:10:12.669 INFO:teuthology.orchestra.run.vm02.stderr: 94.0% -- replaced with /var/log/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/ceph-osd.1.log.gz 2026-03-10T07:10:12.844 INFO:teuthology.orchestra.run.vm02.stderr: 94.0% -- replaced with /var/log/ceph/28bd35e6-1c4e-11f1-9057-21b3549603fc/ceph-osd.3.log.gz 2026-03-10T07:10:12.845 INFO:teuthology.orchestra.run.vm02.stderr: 2026-03-10T07:10:12.845 INFO:teuthology.orchestra.run.vm02.stderr:real 0m2.041s 2026-03-10T07:10:12.845 INFO:teuthology.orchestra.run.vm02.stderr:user 0m3.036s 2026-03-10T07:10:12.845 INFO:teuthology.orchestra.run.vm02.stderr:sys 0m0.156s 2026-03-10T07:10:12.846 INFO:tasks.cephadm:Archiving logs... 2026-03-10T07:10:12.846 DEBUG:teuthology.misc:Transferring archived files from vm02:/var/log/ceph to /archive/kyr-2026-03-10_01:00:38-orch-squid-none-default-vps/937/remote/vm02/log 2026-03-10T07:10:12.846 DEBUG:teuthology.orchestra.run.vm02:> sudo tar c -f - -C /var/log/ceph -- . 2026-03-10T07:10:13.073 DEBUG:teuthology.misc:Transferring archived files from vm05:/var/log/ceph to /archive/kyr-2026-03-10_01:00:38-orch-squid-none-default-vps/937/remote/vm05/log 2026-03-10T07:10:13.073 DEBUG:teuthology.orchestra.run.vm05:> sudo tar c -f - -C /var/log/ceph -- . 2026-03-10T07:10:13.243 INFO:tasks.cephadm:Removing cluster... 2026-03-10T07:10:13.243 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm rm-cluster --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc --force 2026-03-10T07:10:13.449 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm rm-cluster --fsid 28bd35e6-1c4e-11f1-9057-21b3549603fc --force 2026-03-10T07:10:13.656 INFO:tasks.cephadm:Removing cephadm ... 2026-03-10T07:10:13.656 DEBUG:teuthology.orchestra.run.vm02:> rm -rf /home/ubuntu/cephtest/cephadm 2026-03-10T07:10:13.673 DEBUG:teuthology.orchestra.run.vm05:> rm -rf /home/ubuntu/cephtest/cephadm 2026-03-10T07:10:13.689 INFO:tasks.cephadm:Teardown complete 2026-03-10T07:10:13.689 DEBUG:teuthology.run_tasks:Unwinding manager clock 2026-03-10T07:10:13.692 INFO:teuthology.task.clock:Checking final clock skew... 2026-03-10T07:10:13.692 DEBUG:teuthology.orchestra.run.vm02:> PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-03-10T07:10:13.716 DEBUG:teuthology.orchestra.run.vm05:> PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-03-10T07:10:13.731 INFO:teuthology.orchestra.run.vm02.stderr:bash: line 1: ntpq: command not found 2026-03-10T07:10:13.746 INFO:teuthology.orchestra.run.vm05.stderr:bash: line 1: ntpq: command not found 2026-03-10T07:10:13.826 INFO:teuthology.orchestra.run.vm02.stdout:MS Name/IP address Stratum Poll Reach LastRx Last sample 2026-03-10T07:10:13.826 INFO:teuthology.orchestra.run.vm02.stdout:=============================================================================== 2026-03-10T07:10:13.826 INFO:teuthology.orchestra.run.vm02.stdout:^- vps-fra9.orleans.ddnss.de 2 6 377 20 -1355us[-1344us] +/- 16ms 2026-03-10T07:10:13.826 INFO:teuthology.orchestra.run.vm02.stdout:^- ntp01.pingless.com 2 6 377 22 +798us[ +809us] +/- 13ms 2026-03-10T07:10:13.826 INFO:teuthology.orchestra.run.vm02.stdout:^- 172-104-149-161.ip.linod> 2 6 377 21 +5567us[+5578us] +/- 30ms 2026-03-10T07:10:13.826 INFO:teuthology.orchestra.run.vm02.stdout:^* static.222.16.42.77.clie> 2 6 377 20 +16us[ +27us] +/- 3079us 2026-03-10T07:10:13.827 INFO:teuthology.orchestra.run.vm05.stdout:MS Name/IP address Stratum Poll Reach LastRx Last sample 2026-03-10T07:10:13.827 INFO:teuthology.orchestra.run.vm05.stdout:=============================================================================== 2026-03-10T07:10:13.827 INFO:teuthology.orchestra.run.vm05.stdout:^- ntp01.pingless.com 2 6 377 22 +736us[ +736us] +/- 13ms 2026-03-10T07:10:13.827 INFO:teuthology.orchestra.run.vm05.stdout:^- 172-104-149-161.ip.linod> 2 6 377 24 +5565us[+5565us] +/- 30ms 2026-03-10T07:10:13.827 INFO:teuthology.orchestra.run.vm05.stdout:^* static.222.16.42.77.clie> 2 6 377 22 +27us[ +26us] +/- 3112us 2026-03-10T07:10:13.827 INFO:teuthology.orchestra.run.vm05.stdout:^- vps-fra9.orleans.ddnss.de 2 6 377 21 -272us[ -272us] +/- 15ms 2026-03-10T07:10:13.827 DEBUG:teuthology.run_tasks:Unwinding manager ansible.cephlab 2026-03-10T07:10:13.833 INFO:teuthology.task.ansible:Skipping ansible cleanup... 2026-03-10T07:10:13.834 DEBUG:teuthology.run_tasks:Unwinding manager selinux 2026-03-10T07:10:13.837 DEBUG:teuthology.run_tasks:Unwinding manager pcp 2026-03-10T07:10:13.840 DEBUG:teuthology.run_tasks:Unwinding manager internal.timer 2026-03-10T07:10:13.843 INFO:teuthology.task.internal:Duration was 988.806974 seconds 2026-03-10T07:10:13.843 DEBUG:teuthology.run_tasks:Unwinding manager internal.syslog 2026-03-10T07:10:13.846 INFO:teuthology.task.internal.syslog:Shutting down syslog monitoring... 2026-03-10T07:10:13.846 DEBUG:teuthology.orchestra.run.vm02:> sudo rm -f -- /etc/rsyslog.d/80-cephtest.conf && sudo service rsyslog restart 2026-03-10T07:10:13.870 DEBUG:teuthology.orchestra.run.vm05:> sudo rm -f -- /etc/rsyslog.d/80-cephtest.conf && sudo service rsyslog restart 2026-03-10T07:10:13.914 INFO:teuthology.orchestra.run.vm02.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-03-10T07:10:13.917 INFO:teuthology.orchestra.run.vm05.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-03-10T07:10:14.362 INFO:teuthology.task.internal.syslog:Checking logs for errors... 2026-03-10T07:10:14.362 DEBUG:teuthology.task.internal.syslog:Checking ubuntu@vm02.local 2026-03-10T07:10:14.362 DEBUG:teuthology.orchestra.run.vm02:> grep -E --binary-files=text '\bBUG\b|\bINFO\b|\bDEADLOCK\b' /home/ubuntu/cephtest/archive/syslog/kern.log | grep -v 'task .* blocked for more than .* seconds' | grep -v 'lockdep is turned off' | grep -v 'trying to register non-static key' | grep -v 'DEBUG: fsize' | grep -v CRON | grep -v 'BUG: bad unlock balance detected' | grep -v 'inconsistent lock state' | grep -v '*** DEADLOCK ***' | grep -v 'INFO: possible irq lock inversion dependency detected' | grep -v 'INFO: NMI handler (perf_event_nmi_handler) took too long to run' | grep -v 'INFO: recovery required on readonly' | grep -v 'ceph-create-keys: INFO' | grep -v INFO:ceph-create-keys | grep -v 'Loaded datasource DataSourceOpenStack' | grep -v 'container-storage-setup: INFO: Volume group backing root filesystem could not be determined' | grep -E -v '\bsalt-master\b|\bsalt-minion\b|\bsalt-api\b' | grep -v ceph-crash | grep -E -v '\btcmu-runner\b.*\bINFO\b' | head -n 1 2026-03-10T07:10:14.388 DEBUG:teuthology.task.internal.syslog:Checking ubuntu@vm05.local 2026-03-10T07:10:14.389 DEBUG:teuthology.orchestra.run.vm05:> grep -E --binary-files=text '\bBUG\b|\bINFO\b|\bDEADLOCK\b' /home/ubuntu/cephtest/archive/syslog/kern.log | grep -v 'task .* blocked for more than .* seconds' | grep -v 'lockdep is turned off' | grep -v 'trying to register non-static key' | grep -v 'DEBUG: fsize' | grep -v CRON | grep -v 'BUG: bad unlock balance detected' | grep -v 'inconsistent lock state' | grep -v '*** DEADLOCK ***' | grep -v 'INFO: possible irq lock inversion dependency detected' | grep -v 'INFO: NMI handler (perf_event_nmi_handler) took too long to run' | grep -v 'INFO: recovery required on readonly' | grep -v 'ceph-create-keys: INFO' | grep -v INFO:ceph-create-keys | grep -v 'Loaded datasource DataSourceOpenStack' | grep -v 'container-storage-setup: INFO: Volume group backing root filesystem could not be determined' | grep -E -v '\bsalt-master\b|\bsalt-minion\b|\bsalt-api\b' | grep -v ceph-crash | grep -E -v '\btcmu-runner\b.*\bINFO\b' | head -n 1 2026-03-10T07:10:14.413 INFO:teuthology.task.internal.syslog:Gathering journactl... 2026-03-10T07:10:14.413 DEBUG:teuthology.orchestra.run.vm02:> sudo journalctl > /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-03-10T07:10:14.431 DEBUG:teuthology.orchestra.run.vm05:> sudo journalctl > /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-03-10T07:10:15.038 INFO:teuthology.task.internal.syslog:Compressing syslogs... 2026-03-10T07:10:15.038 DEBUG:teuthology.orchestra.run.vm02:> find /home/ubuntu/cephtest/archive/syslog -name '*.log' -print0 | sudo xargs -0 --max-args=1 --max-procs=0 --verbose --no-run-if-empty -- gzip -5 --verbose -- 2026-03-10T07:10:15.040 DEBUG:teuthology.orchestra.run.vm05:> find /home/ubuntu/cephtest/archive/syslog -name '*.log' -print0 | sudo xargs -0 --max-args=1 --max-procs=0 --verbose --no-run-if-empty -- gzip -5 --verbose -- 2026-03-10T07:10:15.062 INFO:teuthology.orchestra.run.vm02.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-10T07:10:15.063 INFO:teuthology.orchestra.run.vm02.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-10T07:10:15.063 INFO:teuthology.orchestra.run.vm02.stderr:/home/ubuntu/cephtest/archive/syslog/kern.log: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/kern.log.gz 2026-03-10T07:10:15.063 INFO:teuthology.orchestra.run.vm02.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-03-10T07:10:15.063 INFO:teuthology.orchestra.run.vm02.stderr:/home/ubuntu/cephtest/archive/syslog/misc.log: /home/ubuntu/cephtest/archive/syslog/journalctl.log: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/misc.log.gz 2026-03-10T07:10:15.068 INFO:teuthology.orchestra.run.vm05.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-10T07:10:15.068 INFO:teuthology.orchestra.run.vm05.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-10T07:10:15.069 INFO:teuthology.orchestra.run.vm05.stderr:/home/ubuntu/cephtest/archive/syslog/kern.log: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/kern.log.gz 2026-03-10T07:10:15.069 INFO:teuthology.orchestra.run.vm05.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-03-10T07:10:15.069 INFO:teuthology.orchestra.run.vm05.stderr:/home/ubuntu/cephtest/archive/syslog/misc.log: /home/ubuntu/cephtest/archive/syslog/journalctl.log: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/misc.log.gz 2026-03-10T07:10:15.219 INFO:teuthology.orchestra.run.vm05.stderr: 97.7% -- replaced with /home/ubuntu/cephtest/archive/syslog/journalctl.log.gz 2026-03-10T07:10:15.247 INFO:teuthology.orchestra.run.vm02.stderr: 97.3% -- replaced with /home/ubuntu/cephtest/archive/syslog/journalctl.log.gz 2026-03-10T07:10:15.249 DEBUG:teuthology.run_tasks:Unwinding manager internal.sudo 2026-03-10T07:10:15.252 INFO:teuthology.task.internal:Restoring /etc/sudoers... 2026-03-10T07:10:15.252 DEBUG:teuthology.orchestra.run.vm02:> sudo mv -f /etc/sudoers.orig.teuthology /etc/sudoers 2026-03-10T07:10:15.318 DEBUG:teuthology.orchestra.run.vm05:> sudo mv -f /etc/sudoers.orig.teuthology /etc/sudoers 2026-03-10T07:10:15.343 DEBUG:teuthology.run_tasks:Unwinding manager internal.coredump 2026-03-10T07:10:15.345 DEBUG:teuthology.orchestra.run.vm02:> sudo sysctl -w kernel.core_pattern=core && sudo bash -c 'for f in `find /home/ubuntu/cephtest/archive/coredump -type f`; do file $f | grep -q systemd-sysusers && rm $f || true ; done' && rmdir --ignore-fail-on-non-empty -- /home/ubuntu/cephtest/archive/coredump 2026-03-10T07:10:15.360 DEBUG:teuthology.orchestra.run.vm05:> sudo sysctl -w kernel.core_pattern=core && sudo bash -c 'for f in `find /home/ubuntu/cephtest/archive/coredump -type f`; do file $f | grep -q systemd-sysusers && rm $f || true ; done' && rmdir --ignore-fail-on-non-empty -- /home/ubuntu/cephtest/archive/coredump 2026-03-10T07:10:15.388 INFO:teuthology.orchestra.run.vm02.stdout:kernel.core_pattern = core 2026-03-10T07:10:15.408 INFO:teuthology.orchestra.run.vm05.stdout:kernel.core_pattern = core 2026-03-10T07:10:15.419 DEBUG:teuthology.orchestra.run.vm02:> test -e /home/ubuntu/cephtest/archive/coredump 2026-03-10T07:10:15.460 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-10T07:10:15.460 DEBUG:teuthology.orchestra.run.vm05:> test -e /home/ubuntu/cephtest/archive/coredump 2026-03-10T07:10:15.473 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-10T07:10:15.474 DEBUG:teuthology.run_tasks:Unwinding manager internal.archive 2026-03-10T07:10:15.476 INFO:teuthology.task.internal:Transferring archived files... 2026-03-10T07:10:15.476 DEBUG:teuthology.misc:Transferring archived files from vm02:/home/ubuntu/cephtest/archive to /archive/kyr-2026-03-10_01:00:38-orch-squid-none-default-vps/937/remote/vm02 2026-03-10T07:10:15.476 DEBUG:teuthology.orchestra.run.vm02:> sudo tar c -f - -C /home/ubuntu/cephtest/archive -- . 2026-03-10T07:10:15.533 DEBUG:teuthology.misc:Transferring archived files from vm05:/home/ubuntu/cephtest/archive to /archive/kyr-2026-03-10_01:00:38-orch-squid-none-default-vps/937/remote/vm05 2026-03-10T07:10:15.533 DEBUG:teuthology.orchestra.run.vm05:> sudo tar c -f - -C /home/ubuntu/cephtest/archive -- . 2026-03-10T07:10:15.561 INFO:teuthology.task.internal:Removing archive directory... 2026-03-10T07:10:15.561 DEBUG:teuthology.orchestra.run.vm02:> rm -rf -- /home/ubuntu/cephtest/archive 2026-03-10T07:10:15.573 DEBUG:teuthology.orchestra.run.vm05:> rm -rf -- /home/ubuntu/cephtest/archive 2026-03-10T07:10:15.614 DEBUG:teuthology.run_tasks:Unwinding manager internal.archive_upload 2026-03-10T07:10:15.617 INFO:teuthology.task.internal:Not uploading archives. 2026-03-10T07:10:15.617 DEBUG:teuthology.run_tasks:Unwinding manager internal.base 2026-03-10T07:10:15.620 INFO:teuthology.task.internal:Tidying up after the test... 2026-03-10T07:10:15.620 DEBUG:teuthology.orchestra.run.vm02:> find /home/ubuntu/cephtest -ls ; rmdir -- /home/ubuntu/cephtest 2026-03-10T07:10:15.629 DEBUG:teuthology.orchestra.run.vm05:> find /home/ubuntu/cephtest -ls ; rmdir -- /home/ubuntu/cephtest 2026-03-10T07:10:15.644 INFO:teuthology.orchestra.run.vm02.stdout: 8532145 0 drwxr-xr-x 2 ubuntu ubuntu 6 Mar 10 07:10 /home/ubuntu/cephtest 2026-03-10T07:10:15.669 INFO:teuthology.orchestra.run.vm05.stdout: 8532139 0 drwxr-xr-x 2 ubuntu ubuntu 6 Mar 10 07:10 /home/ubuntu/cephtest 2026-03-10T07:10:15.670 DEBUG:teuthology.run_tasks:Unwinding manager console_log 2026-03-10T07:10:15.675 INFO:teuthology.run:Summary data: description: orch/cephadm/upgrade/{1-start-distro/1-start-centos_9.stream 2-repo_digest/repo_digest 3-upgrade/simple 4-wait 5-upgrade-ls agent/off mon_election/classic} duration: 988.8069739341736 owner: kyr success: true 2026-03-10T07:10:15.675 DEBUG:teuthology.report:Pushing job info to http://localhost:8080 2026-03-10T07:10:15.696 INFO:teuthology.run:pass