2026-03-08T23:51:26.636 INFO:root:teuthology version: 1.2.4.dev6+g1c580df7a 2026-03-08T23:51:26.643 DEBUG:teuthology.report:Pushing job info to http://localhost:8080 2026-03-08T23:51:26.664 INFO:teuthology.run:Config: archive_path: /archive/kyr-2026-03-08_22:22:45-orch:cephadm-squid-none-default-vps/307 branch: squid description: orch:cephadm/upgrade/{1-start-distro/1-start-centos_9.stream 2-repo_digest/defaut 3-upgrade/staggered 4-wait 5-upgrade-ls agent/off mon_election/classic} email: null first_in_suite: false flavor: default job_id: '307' last_in_suite: false machine_type: vps name: kyr-2026-03-08_22:22:45-orch:cephadm-squid-none-default-vps no_nested_subset: false os_type: centos os_version: 9.stream overrides: admin_socket: branch: squid ansible.cephlab: branch: main skip_tags: nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs vars: timezone: UTC ceph: conf: global: mon election default strategy: 1 mgr: debug mgr: 20 debug ms: 1 mgr/cephadm/use_agent: false mon: debug mon: 20 debug ms: 1 debug paxos: 20 osd: debug ms: 1 debug osd: 20 osd mclock iops capacity threshold hdd: 49000 flavor: default log-ignorelist: - \(MDS_ALL_DOWN\) - \(MDS_UP_LESS_THAN_MAX\) - CEPHADM_STRAY_DAEMON - CEPHADM_FAILED_DAEMON - CEPHADM_AGENT_DOWN log-only-match: - CEPHADM_ sha1: e911bdebe5c8faa3800735d1568fcdca65db60df ceph-deploy: conf: client: log file: /var/log/ceph/ceph-$name.$pid.log mon: {} install: ceph: flavor: default sha1: e911bdebe5c8faa3800735d1568fcdca65db60df extra_system_packages: deb: - python3-xmltodict - python3-jmespath rpm: - bzip2 - perl-Test-Harness - python3-xmltodict - python3-jmespath workunit: branch: tt-squid sha1: 569c3e99c9b32a51b4eaf08731c728f4513ed589 owner: kyr priority: 1000 repo: https://github.com/ceph/ceph.git roles: - - mon.a - mon.c - mgr.y - osd.0 - osd.1 - osd.2 - osd.3 - client.0 - node-exporter.a - alertmanager.a - - mon.b - mgr.x - osd.4 - osd.5 - osd.6 - osd.7 - client.1 - prometheus.a - grafana.a - node-exporter.b seed: 8017 sha1: e911bdebe5c8faa3800735d1568fcdca65db60df sleep_before_teardown: 0 subset: 1/64 suite: orch:cephadm suite_branch: tt-squid suite_path: /home/teuthos/src/github.com_kshtsk_ceph_569c3e99c9b32a51b4eaf08731c728f4513ed589/qa suite_relpath: qa suite_repo: https://github.com/kshtsk/ceph.git suite_sha1: 569c3e99c9b32a51b4eaf08731c728f4513ed589 targets: vm04.local: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBPsr/gMIDJ/NDa+n6jTo4wZE9nxO4DJFPZvEtuOXxKDR85yW1WDuO2WNWek0a8MmpCyss2Lc/eNdPxaZzY41uAM= vm10.local: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBEgcqVqfpwq0uL9KvQFraF9n9tHvQszD87IQjPRL/lljfNM8CwEdfa9Ba7zPDwzmP6qEllw0abrZz67RP9gSY9w= tasks: - cephadm: cephadm_branch: v17.2.0 cephadm_git_url: https://github.com/ceph/ceph image: quay.io/ceph/ceph:v17.2.0 - cephadm.shell: env: - sha1 mon.a: - radosgw-admin realm create --rgw-realm=r --default - radosgw-admin zonegroup create --rgw-zonegroup=default --master --default - radosgw-admin zone create --rgw-zonegroup=default --rgw-zone=z --master --default - radosgw-admin period update --rgw-realm=r --commit - ceph orch apply rgw foo --realm r --zone z --placement=2 --port=8000 - ceph osd pool create foo - rbd pool init foo - ceph orch apply iscsi foo u p - sleep 180 - ceph config set mon mon_warn_on_insecure_global_id_reclaim false --force - ceph config set mon mon_warn_on_insecure_global_id_reclaim_allowed false --force - ceph config set global log_to_journald false --force - ceph orch ps - ceph versions - ceph -s - ceph orch ls - ceph orch daemon redeploy "mgr.$(ceph mgr dump -f json | jq .standbys | jq .[] | jq -r .name)" --image quay.ceph.io/ceph-ci/ceph:$sha1 - ceph orch ps --refresh - sleep 180 - ceph orch ps - ceph versions - ceph -s - ceph health detail - ceph versions | jq -e '.mgr | length == 2' - ceph mgr fail - sleep 180 - ceph orch daemon redeploy "mgr.$(ceph mgr dump -f json | jq .standbys | jq .[] | jq -r .name)" --image quay.ceph.io/ceph-ci/ceph:$sha1 - ceph orch ps --refresh - sleep 180 - ceph orch ps - ceph versions - ceph health detail - ceph -s - ceph mgr fail - sleep 180 - ceph orch ps - ceph versions - ceph -s - ceph health detail - ceph versions | jq -e '.mgr | length == 1' - ceph mgr fail - sleep 180 - ceph orch ps - ceph orch ls - ceph versions - ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types mgr - while ceph orch upgrade status | jq '.in_progress' | grep true && ! ceph orch upgrade status | jq '.message' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; sleep 30 ; done - ceph versions | jq -e '.mgr | length == 1' - ceph versions | jq -e '.mgr | keys' | grep $sha1 - ceph versions | jq -e '.overall | length == 2' - ceph orch upgrade check quay.ceph.io/ceph-ci/ceph:$sha1 | jq -e '.up_to_date | length == 2' - ceph orch upgrade status - ceph health detail - ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types mon --hosts $(ceph orch ps | grep mgr.x | awk '{print $2}') - while ceph orch upgrade status | jq '.in_progress' | grep true && ! ceph orch upgrade status | jq '.message' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; sleep 30 ; done - ceph orch ps - ceph versions | jq -e '.mon | length == 2' - ceph orch upgrade status - ceph health detail - ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types mon --hosts $(ceph orch ps | grep mgr.y | awk '{print $2}') - while ceph orch upgrade status | jq '.in_progress' | grep true && ! ceph orch upgrade status | jq '.message' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; sleep 30 ; done - ceph orch ps - ceph versions | jq -e '.mon | length == 1' - ceph versions | jq -e '.mon | keys' | grep $sha1 - ceph orch upgrade check quay.ceph.io/ceph-ci/ceph:$sha1 | jq -e '.up_to_date | length == 5' - ceph orch upgrade status - ceph health detail - ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types osd --limit 2 - while ceph orch upgrade status | jq '.in_progress' | grep true && ! ceph orch upgrade status | jq '.message' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; sleep 30 ; done - ceph orch ps - ceph versions | jq -e '.osd | length == 2' - ceph orch upgrade check quay.ceph.io/ceph-ci/ceph:$sha1 | jq -e '.up_to_date | length == 7' - ceph orch upgrade status - ceph health detail - ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types crash,osd --limit 1 - while ceph orch upgrade status | jq '.in_progress' | grep true && ! ceph orch upgrade status | jq '.message' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; sleep 30 ; done - ceph orch ps - ceph versions | jq -e '.osd | length == 2' - ceph orch upgrade check quay.ceph.io/ceph-ci/ceph:$sha1 | jq -e '.up_to_date | length == 8' - ceph orch upgrade status - ceph health detail - ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types crash,osd - while ceph orch upgrade status | jq '.in_progress' | grep true && ! ceph orch upgrade status | jq '.message' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; sleep 30 ; done - ceph orch ps - ceph versions | jq -e '.osd | length == 1' - ceph versions | jq -e '.osd | keys' | grep $sha1 - ceph orch upgrade status - ceph health detail - ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --services rgw.foo - while ceph orch upgrade status | jq '.in_progress' | grep true && ! ceph orch upgrade status | jq '.message' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; sleep 30 ; done - ceph orch ps - ceph versions | jq -e '.rgw | length == 1' - ceph versions | jq -e '.rgw | keys' | grep $sha1 - ceph orch upgrade status - ceph health detail - ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 - cephadm.shell: env: - sha1 mon.a: - while ceph orch upgrade status | jq '.in_progress' | grep true && ! ceph orch upgrade status | jq '.message' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; ceph health detail ; sleep 30 ; done - ceph orch ps - ceph versions - echo "wait for servicemap items w/ changing names to refresh" - sleep 60 - ceph orch ps - ceph versions - ceph orch upgrade status - ceph health detail - ceph versions | jq -e '.overall | length == 1' - ceph versions | jq -e '.overall | keys' | grep $sha1 - ceph orch ls | grep '^osd ' - cephadm.shell: mon.a: - ceph orch upgrade ls - ceph orch upgrade ls --image quay.io/ceph/ceph --show-all-versions | grep 16.2.0 - ceph orch upgrade ls --image quay.io/ceph/ceph --tags | grep v16.2.2 teuthology: fragments_dropped: [] meta: {} postmerge: [] teuthology_branch: clyso-debian-13 teuthology_repo: https://github.com/clyso/teuthology teuthology_sha1: 1c580df7a9c7c2aadc272da296344fd99f27c444 timestamp: 2026-03-08_22:22:45 tube: vps user: kyr verbose: false worker_log: /home/teuthos/.teuthology/dispatcher/dispatcher.vps.611473 2026-03-08T23:51:26.664 INFO:teuthology.run:suite_path is set to /home/teuthos/src/github.com_kshtsk_ceph_569c3e99c9b32a51b4eaf08731c728f4513ed589/qa; will attempt to use it 2026-03-08T23:51:26.664 INFO:teuthology.run:Found tasks at /home/teuthos/src/github.com_kshtsk_ceph_569c3e99c9b32a51b4eaf08731c728f4513ed589/qa/tasks 2026-03-08T23:51:26.664 INFO:teuthology.run_tasks:Running task internal.check_packages... 2026-03-08T23:51:26.665 INFO:teuthology.task.internal:Checking packages... 2026-03-08T23:51:26.665 INFO:teuthology.task.internal:Checking packages for os_type 'centos', flavor 'default' and ceph hash 'e911bdebe5c8faa3800735d1568fcdca65db60df' 2026-03-08T23:51:26.665 WARNING:teuthology.packaging:More than one of ref, tag, branch, or sha1 supplied; using branch 2026-03-08T23:51:26.665 INFO:teuthology.packaging:ref: None 2026-03-08T23:51:26.665 INFO:teuthology.packaging:tag: None 2026-03-08T23:51:26.665 INFO:teuthology.packaging:branch: squid 2026-03-08T23:51:26.665 INFO:teuthology.packaging:sha1: e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-08T23:51:26.665 DEBUG:teuthology.packaging:Querying https://shaman.ceph.com/api/search?status=ready&project=ceph&flavor=default&distros=centos%2F9%2Fx86_64&ref=squid 2026-03-08T23:51:27.454 INFO:teuthology.task.internal:Found packages for ceph version 19.2.3-678.ge911bdeb 2026-03-08T23:51:27.455 INFO:teuthology.run_tasks:Running task internal.buildpackages_prep... 2026-03-08T23:51:27.456 INFO:teuthology.task.internal:no buildpackages task found 2026-03-08T23:51:27.456 INFO:teuthology.run_tasks:Running task internal.save_config... 2026-03-08T23:51:27.456 INFO:teuthology.task.internal:Saving configuration 2026-03-08T23:51:27.463 INFO:teuthology.run_tasks:Running task internal.check_lock... 2026-03-08T23:51:27.464 INFO:teuthology.task.internal.check_lock:Checking locks... 2026-03-08T23:51:27.470 DEBUG:teuthology.task.internal.check_lock:machine status is {'name': 'vm04.local', 'description': '/archive/kyr-2026-03-08_22:22:45-orch:cephadm-squid-none-default-vps/307', 'up': True, 'machine_type': 'vps', 'is_vm': True, 'vm_host': {'name': 'localhost', 'description': None, 'up': True, 'machine_type': 'libvirt', 'is_vm': False, 'vm_host': None, 'os_type': None, 'os_version': None, 'arch': None, 'locked': True, 'locked_since': None, 'locked_by': None, 'mac_address': None, 'ssh_pub_key': None}, 'os_type': 'centos', 'os_version': '9.stream', 'arch': 'x86_64', 'locked': True, 'locked_since': '2026-03-08 23:49:48.854087', 'locked_by': 'kyr', 'mac_address': '52:55:00:00:00:04', 'ssh_pub_key': 'ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBPsr/gMIDJ/NDa+n6jTo4wZE9nxO4DJFPZvEtuOXxKDR85yW1WDuO2WNWek0a8MmpCyss2Lc/eNdPxaZzY41uAM='} 2026-03-08T23:51:27.474 DEBUG:teuthology.task.internal.check_lock:machine status is {'name': 'vm10.local', 'description': '/archive/kyr-2026-03-08_22:22:45-orch:cephadm-squid-none-default-vps/307', 'up': True, 'machine_type': 'vps', 'is_vm': True, 'vm_host': {'name': 'localhost', 'description': None, 'up': True, 'machine_type': 'libvirt', 'is_vm': False, 'vm_host': None, 'os_type': None, 'os_version': None, 'arch': None, 'locked': True, 'locked_since': None, 'locked_by': None, 'mac_address': None, 'ssh_pub_key': None}, 'os_type': 'centos', 'os_version': '9.stream', 'arch': 'x86_64', 'locked': True, 'locked_since': '2026-03-08 23:49:48.854610', 'locked_by': 'kyr', 'mac_address': '52:55:00:00:00:0a', 'ssh_pub_key': 'ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBEgcqVqfpwq0uL9KvQFraF9n9tHvQszD87IQjPRL/lljfNM8CwEdfa9Ba7zPDwzmP6qEllw0abrZz67RP9gSY9w='} 2026-03-08T23:51:27.474 INFO:teuthology.run_tasks:Running task internal.add_remotes... 2026-03-08T23:51:27.475 INFO:teuthology.task.internal:roles: ubuntu@vm04.local - ['mon.a', 'mon.c', 'mgr.y', 'osd.0', 'osd.1', 'osd.2', 'osd.3', 'client.0', 'node-exporter.a', 'alertmanager.a'] 2026-03-08T23:51:27.475 INFO:teuthology.task.internal:roles: ubuntu@vm10.local - ['mon.b', 'mgr.x', 'osd.4', 'osd.5', 'osd.6', 'osd.7', 'client.1', 'prometheus.a', 'grafana.a', 'node-exporter.b'] 2026-03-08T23:51:27.475 INFO:teuthology.run_tasks:Running task console_log... 2026-03-08T23:51:27.480 DEBUG:teuthology.task.console_log:vm04 does not support IPMI; excluding 2026-03-08T23:51:27.486 DEBUG:teuthology.task.console_log:vm10 does not support IPMI; excluding 2026-03-08T23:51:27.486 DEBUG:teuthology.exit:Installing handler: Handler(exiter=, func=.kill_console_loggers at 0x7ff990cad480>, signals=[15]) 2026-03-08T23:51:27.486 INFO:teuthology.run_tasks:Running task internal.connect... 2026-03-08T23:51:27.487 INFO:teuthology.task.internal:Opening connections... 2026-03-08T23:51:27.487 DEBUG:teuthology.task.internal:connecting to ubuntu@vm04.local 2026-03-08T23:51:27.488 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm04.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-08T23:51:27.546 DEBUG:teuthology.task.internal:connecting to ubuntu@vm10.local 2026-03-08T23:51:27.547 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm10.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-08T23:51:27.606 INFO:teuthology.run_tasks:Running task internal.push_inventory... 2026-03-08T23:51:27.607 DEBUG:teuthology.orchestra.run.vm04:> uname -m 2026-03-08T23:51:27.654 INFO:teuthology.orchestra.run.vm04.stdout:x86_64 2026-03-08T23:51:27.654 DEBUG:teuthology.orchestra.run.vm04:> cat /etc/os-release 2026-03-08T23:51:27.711 INFO:teuthology.orchestra.run.vm04.stdout:NAME="CentOS Stream" 2026-03-08T23:51:27.711 INFO:teuthology.orchestra.run.vm04.stdout:VERSION="9" 2026-03-08T23:51:27.711 INFO:teuthology.orchestra.run.vm04.stdout:ID="centos" 2026-03-08T23:51:27.711 INFO:teuthology.orchestra.run.vm04.stdout:ID_LIKE="rhel fedora" 2026-03-08T23:51:27.711 INFO:teuthology.orchestra.run.vm04.stdout:VERSION_ID="9" 2026-03-08T23:51:27.711 INFO:teuthology.orchestra.run.vm04.stdout:PLATFORM_ID="platform:el9" 2026-03-08T23:51:27.711 INFO:teuthology.orchestra.run.vm04.stdout:PRETTY_NAME="CentOS Stream 9" 2026-03-08T23:51:27.712 INFO:teuthology.orchestra.run.vm04.stdout:ANSI_COLOR="0;31" 2026-03-08T23:51:27.712 INFO:teuthology.orchestra.run.vm04.stdout:LOGO="fedora-logo-icon" 2026-03-08T23:51:27.712 INFO:teuthology.orchestra.run.vm04.stdout:CPE_NAME="cpe:/o:centos:centos:9" 2026-03-08T23:51:27.712 INFO:teuthology.orchestra.run.vm04.stdout:HOME_URL="https://centos.org/" 2026-03-08T23:51:27.712 INFO:teuthology.orchestra.run.vm04.stdout:BUG_REPORT_URL="https://issues.redhat.com/" 2026-03-08T23:51:27.712 INFO:teuthology.orchestra.run.vm04.stdout:REDHAT_SUPPORT_PRODUCT="Red Hat Enterprise Linux 9" 2026-03-08T23:51:27.712 INFO:teuthology.orchestra.run.vm04.stdout:REDHAT_SUPPORT_PRODUCT_VERSION="CentOS Stream" 2026-03-08T23:51:27.712 INFO:teuthology.lock.ops:Updating vm04.local on lock server 2026-03-08T23:51:27.716 DEBUG:teuthology.orchestra.run.vm10:> uname -m 2026-03-08T23:51:27.730 INFO:teuthology.orchestra.run.vm10.stdout:x86_64 2026-03-08T23:51:27.731 DEBUG:teuthology.orchestra.run.vm10:> cat /etc/os-release 2026-03-08T23:51:27.784 INFO:teuthology.orchestra.run.vm10.stdout:NAME="CentOS Stream" 2026-03-08T23:51:27.784 INFO:teuthology.orchestra.run.vm10.stdout:VERSION="9" 2026-03-08T23:51:27.784 INFO:teuthology.orchestra.run.vm10.stdout:ID="centos" 2026-03-08T23:51:27.784 INFO:teuthology.orchestra.run.vm10.stdout:ID_LIKE="rhel fedora" 2026-03-08T23:51:27.784 INFO:teuthology.orchestra.run.vm10.stdout:VERSION_ID="9" 2026-03-08T23:51:27.784 INFO:teuthology.orchestra.run.vm10.stdout:PLATFORM_ID="platform:el9" 2026-03-08T23:51:27.784 INFO:teuthology.orchestra.run.vm10.stdout:PRETTY_NAME="CentOS Stream 9" 2026-03-08T23:51:27.784 INFO:teuthology.orchestra.run.vm10.stdout:ANSI_COLOR="0;31" 2026-03-08T23:51:27.784 INFO:teuthology.orchestra.run.vm10.stdout:LOGO="fedora-logo-icon" 2026-03-08T23:51:27.784 INFO:teuthology.orchestra.run.vm10.stdout:CPE_NAME="cpe:/o:centos:centos:9" 2026-03-08T23:51:27.784 INFO:teuthology.orchestra.run.vm10.stdout:HOME_URL="https://centos.org/" 2026-03-08T23:51:27.784 INFO:teuthology.orchestra.run.vm10.stdout:BUG_REPORT_URL="https://issues.redhat.com/" 2026-03-08T23:51:27.784 INFO:teuthology.orchestra.run.vm10.stdout:REDHAT_SUPPORT_PRODUCT="Red Hat Enterprise Linux 9" 2026-03-08T23:51:27.784 INFO:teuthology.orchestra.run.vm10.stdout:REDHAT_SUPPORT_PRODUCT_VERSION="CentOS Stream" 2026-03-08T23:51:27.784 INFO:teuthology.lock.ops:Updating vm10.local on lock server 2026-03-08T23:51:27.789 INFO:teuthology.run_tasks:Running task internal.serialize_remote_roles... 2026-03-08T23:51:27.791 INFO:teuthology.run_tasks:Running task internal.check_conflict... 2026-03-08T23:51:27.792 INFO:teuthology.task.internal:Checking for old test directory... 2026-03-08T23:51:27.792 DEBUG:teuthology.orchestra.run.vm04:> test '!' -e /home/ubuntu/cephtest 2026-03-08T23:51:27.793 DEBUG:teuthology.orchestra.run.vm10:> test '!' -e /home/ubuntu/cephtest 2026-03-08T23:51:27.837 INFO:teuthology.run_tasks:Running task internal.check_ceph_data... 2026-03-08T23:51:27.838 INFO:teuthology.task.internal:Checking for non-empty /var/lib/ceph... 2026-03-08T23:51:27.838 DEBUG:teuthology.orchestra.run.vm04:> test -z $(ls -A /var/lib/ceph) 2026-03-08T23:51:27.851 DEBUG:teuthology.orchestra.run.vm10:> test -z $(ls -A /var/lib/ceph) 2026-03-08T23:51:27.863 INFO:teuthology.orchestra.run.vm04.stderr:ls: cannot access '/var/lib/ceph': No such file or directory 2026-03-08T23:51:27.893 INFO:teuthology.orchestra.run.vm10.stderr:ls: cannot access '/var/lib/ceph': No such file or directory 2026-03-08T23:51:27.893 INFO:teuthology.run_tasks:Running task internal.vm_setup... 2026-03-08T23:51:27.901 DEBUG:teuthology.orchestra.run.vm04:> test -e /ceph-qa-ready 2026-03-08T23:51:27.918 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-08T23:51:28.101 DEBUG:teuthology.orchestra.run.vm10:> test -e /ceph-qa-ready 2026-03-08T23:51:28.114 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-08T23:51:28.290 INFO:teuthology.run_tasks:Running task internal.base... 2026-03-08T23:51:28.291 INFO:teuthology.task.internal:Creating test directory... 2026-03-08T23:51:28.291 DEBUG:teuthology.orchestra.run.vm04:> mkdir -p -m0755 -- /home/ubuntu/cephtest 2026-03-08T23:51:28.293 DEBUG:teuthology.orchestra.run.vm10:> mkdir -p -m0755 -- /home/ubuntu/cephtest 2026-03-08T23:51:28.307 INFO:teuthology.run_tasks:Running task internal.archive_upload... 2026-03-08T23:51:28.309 INFO:teuthology.run_tasks:Running task internal.archive... 2026-03-08T23:51:28.310 INFO:teuthology.task.internal:Creating archive directory... 2026-03-08T23:51:28.310 DEBUG:teuthology.orchestra.run.vm04:> install -d -m0755 -- /home/ubuntu/cephtest/archive 2026-03-08T23:51:28.349 DEBUG:teuthology.orchestra.run.vm10:> install -d -m0755 -- /home/ubuntu/cephtest/archive 2026-03-08T23:51:28.368 INFO:teuthology.run_tasks:Running task internal.coredump... 2026-03-08T23:51:28.369 INFO:teuthology.task.internal:Enabling coredump saving... 2026-03-08T23:51:28.369 DEBUG:teuthology.orchestra.run.vm04:> test -f /run/.containerenv -o -f /.dockerenv 2026-03-08T23:51:28.419 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-08T23:51:28.419 DEBUG:teuthology.orchestra.run.vm10:> test -f /run/.containerenv -o -f /.dockerenv 2026-03-08T23:51:28.435 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-08T23:51:28.435 DEBUG:teuthology.orchestra.run.vm04:> install -d -m0755 -- /home/ubuntu/cephtest/archive/coredump && sudo sysctl -w kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core && echo kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core | sudo tee -a /etc/sysctl.conf 2026-03-08T23:51:28.461 DEBUG:teuthology.orchestra.run.vm10:> install -d -m0755 -- /home/ubuntu/cephtest/archive/coredump && sudo sysctl -w kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core && echo kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core | sudo tee -a /etc/sysctl.conf 2026-03-08T23:51:28.486 INFO:teuthology.orchestra.run.vm04.stdout:kernel.core_pattern = /home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-08T23:51:28.495 INFO:teuthology.orchestra.run.vm04.stdout:kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-08T23:51:28.500 INFO:teuthology.orchestra.run.vm10.stdout:kernel.core_pattern = /home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-08T23:51:28.508 INFO:teuthology.orchestra.run.vm10.stdout:kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-08T23:51:28.509 INFO:teuthology.run_tasks:Running task internal.sudo... 2026-03-08T23:51:28.511 INFO:teuthology.task.internal:Configuring sudo... 2026-03-08T23:51:28.511 DEBUG:teuthology.orchestra.run.vm04:> sudo sed -i.orig.teuthology -e 's/^\([^#]*\) \(requiretty\)/\1 !\2/g' -e 's/^\([^#]*\) !\(visiblepw\)/\1 \2/g' /etc/sudoers 2026-03-08T23:51:28.538 DEBUG:teuthology.orchestra.run.vm10:> sudo sed -i.orig.teuthology -e 's/^\([^#]*\) \(requiretty\)/\1 !\2/g' -e 's/^\([^#]*\) !\(visiblepw\)/\1 \2/g' /etc/sudoers 2026-03-08T23:51:28.576 INFO:teuthology.run_tasks:Running task internal.syslog... 2026-03-08T23:51:28.578 INFO:teuthology.task.internal.syslog:Starting syslog monitoring... 2026-03-08T23:51:28.579 DEBUG:teuthology.orchestra.run.vm04:> mkdir -p -m0755 -- /home/ubuntu/cephtest/archive/syslog 2026-03-08T23:51:28.607 DEBUG:teuthology.orchestra.run.vm10:> mkdir -p -m0755 -- /home/ubuntu/cephtest/archive/syslog 2026-03-08T23:51:28.634 DEBUG:teuthology.orchestra.run.vm04:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-08T23:51:28.688 DEBUG:teuthology.orchestra.run.vm04:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-08T23:51:28.747 DEBUG:teuthology.orchestra.run.vm04:> set -ex 2026-03-08T23:51:28.747 DEBUG:teuthology.orchestra.run.vm04:> sudo dd of=/etc/rsyslog.d/80-cephtest.conf 2026-03-08T23:51:28.808 DEBUG:teuthology.orchestra.run.vm10:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-08T23:51:28.831 DEBUG:teuthology.orchestra.run.vm10:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-08T23:51:28.887 DEBUG:teuthology.orchestra.run.vm10:> set -ex 2026-03-08T23:51:28.887 DEBUG:teuthology.orchestra.run.vm10:> sudo dd of=/etc/rsyslog.d/80-cephtest.conf 2026-03-08T23:51:28.944 DEBUG:teuthology.orchestra.run.vm04:> sudo service rsyslog restart 2026-03-08T23:51:28.946 DEBUG:teuthology.orchestra.run.vm10:> sudo service rsyslog restart 2026-03-08T23:51:28.972 INFO:teuthology.orchestra.run.vm04.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-03-08T23:51:29.009 INFO:teuthology.orchestra.run.vm10.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-03-08T23:51:29.352 INFO:teuthology.run_tasks:Running task internal.timer... 2026-03-08T23:51:29.353 INFO:teuthology.task.internal:Starting timer... 2026-03-08T23:51:29.353 INFO:teuthology.run_tasks:Running task pcp... 2026-03-08T23:51:29.356 INFO:teuthology.run_tasks:Running task selinux... 2026-03-08T23:51:29.359 INFO:teuthology.task.selinux:Excluding vm04: VMs are not yet supported 2026-03-08T23:51:29.359 INFO:teuthology.task.selinux:Excluding vm10: VMs are not yet supported 2026-03-08T23:51:29.359 DEBUG:teuthology.task.selinux:Getting current SELinux state 2026-03-08T23:51:29.359 DEBUG:teuthology.task.selinux:Existing SELinux modes: {} 2026-03-08T23:51:29.359 INFO:teuthology.task.selinux:Putting SELinux into permissive mode 2026-03-08T23:51:29.359 INFO:teuthology.run_tasks:Running task ansible.cephlab... 2026-03-08T23:51:29.360 DEBUG:teuthology.task:Applying overrides for task ansible.cephlab: {'branch': 'main', 'skip_tags': 'nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs', 'vars': {'timezone': 'UTC'}} 2026-03-08T23:51:29.360 DEBUG:teuthology.repo_utils:Setting repo remote to https://github.com/ceph/ceph-cm-ansible.git 2026-03-08T23:51:29.362 INFO:teuthology.repo_utils:Fetching github.com_ceph_ceph-cm-ansible_main from origin 2026-03-08T23:51:29.922 DEBUG:teuthology.repo_utils:Resetting repo at /home/teuthos/src/github.com_ceph_ceph-cm-ansible_main to origin/main 2026-03-08T23:51:29.927 INFO:teuthology.task.ansible:Playbook: [{'import_playbook': 'ansible_managed.yml'}, {'import_playbook': 'teuthology.yml'}, {'hosts': 'testnodes', 'tasks': [{'set_fact': {'ran_from_cephlab_playbook': True}}]}, {'import_playbook': 'testnodes.yml'}, {'import_playbook': 'container-host.yml'}, {'import_playbook': 'cobbler.yml'}, {'import_playbook': 'paddles.yml'}, {'import_playbook': 'pulpito.yml'}, {'hosts': 'testnodes', 'become': True, 'tasks': [{'name': 'Touch /ceph-qa-ready', 'file': {'path': '/ceph-qa-ready', 'state': 'touch'}, 'when': 'ran_from_cephlab_playbook|bool'}]}] 2026-03-08T23:51:29.928 DEBUG:teuthology.task.ansible:Running ansible-playbook -v --extra-vars '{"ansible_ssh_user": "ubuntu", "timezone": "UTC"}' -i /tmp/teuth_ansible_inventory868xgaje --limit vm04.local,vm10.local /home/teuthos/src/github.com_ceph_ceph-cm-ansible_main/cephlab.yml --skip-tags nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs 2026-03-08T23:53:22.400 DEBUG:teuthology.task.ansible:Reconnecting to [Remote(name='ubuntu@vm04.local'), Remote(name='ubuntu@vm10.local')] 2026-03-08T23:53:22.400 INFO:teuthology.orchestra.remote:Trying to reconnect to host 'ubuntu@vm04.local' 2026-03-08T23:53:22.400 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm04.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-08T23:53:22.467 DEBUG:teuthology.orchestra.run.vm04:> true 2026-03-08T23:53:22.550 INFO:teuthology.orchestra.remote:Successfully reconnected to host 'ubuntu@vm04.local' 2026-03-08T23:53:22.550 INFO:teuthology.orchestra.remote:Trying to reconnect to host 'ubuntu@vm10.local' 2026-03-08T23:53:22.550 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm10.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-08T23:53:22.614 DEBUG:teuthology.orchestra.run.vm10:> true 2026-03-08T23:53:22.691 INFO:teuthology.orchestra.remote:Successfully reconnected to host 'ubuntu@vm10.local' 2026-03-08T23:53:22.691 INFO:teuthology.run_tasks:Running task clock... 2026-03-08T23:53:22.694 INFO:teuthology.task.clock:Syncing clocks and checking initial clock skew... 2026-03-08T23:53:22.694 INFO:teuthology.orchestra.run:Running command with timeout 360 2026-03-08T23:53:22.694 DEBUG:teuthology.orchestra.run.vm04:> sudo systemctl stop ntp.service || sudo systemctl stop ntpd.service || sudo systemctl stop chronyd.service ; sudo ntpd -gq || sudo chronyc makestep ; sudo systemctl start ntp.service || sudo systemctl start ntpd.service || sudo systemctl start chronyd.service ; PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-03-08T23:53:22.696 INFO:teuthology.orchestra.run:Running command with timeout 360 2026-03-08T23:53:22.696 DEBUG:teuthology.orchestra.run.vm10:> sudo systemctl stop ntp.service || sudo systemctl stop ntpd.service || sudo systemctl stop chronyd.service ; sudo ntpd -gq || sudo chronyc makestep ; sudo systemctl start ntp.service || sudo systemctl start ntpd.service || sudo systemctl start chronyd.service ; PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-03-08T23:53:22.735 INFO:teuthology.orchestra.run.vm04.stderr:Failed to stop ntp.service: Unit ntp.service not loaded. 2026-03-08T23:53:22.757 INFO:teuthology.orchestra.run.vm04.stderr:Failed to stop ntpd.service: Unit ntpd.service not loaded. 2026-03-08T23:53:22.770 INFO:teuthology.orchestra.run.vm10.stderr:Failed to stop ntp.service: Unit ntp.service not loaded. 2026-03-08T23:53:22.786 INFO:teuthology.orchestra.run.vm04.stderr:sudo: ntpd: command not found 2026-03-08T23:53:22.790 INFO:teuthology.orchestra.run.vm10.stderr:Failed to stop ntpd.service: Unit ntpd.service not loaded. 2026-03-08T23:53:22.801 INFO:teuthology.orchestra.run.vm04.stdout:506 Cannot talk to daemon 2026-03-08T23:53:22.820 INFO:teuthology.orchestra.run.vm10.stderr:sudo: ntpd: command not found 2026-03-08T23:53:22.823 INFO:teuthology.orchestra.run.vm04.stderr:Failed to start ntp.service: Unit ntp.service not found. 2026-03-08T23:53:22.831 INFO:teuthology.orchestra.run.vm10.stdout:506 Cannot talk to daemon 2026-03-08T23:53:22.841 INFO:teuthology.orchestra.run.vm04.stderr:Failed to start ntpd.service: Unit ntpd.service not found. 2026-03-08T23:53:22.851 INFO:teuthology.orchestra.run.vm10.stderr:Failed to start ntp.service: Unit ntp.service not found. 2026-03-08T23:53:22.866 INFO:teuthology.orchestra.run.vm10.stderr:Failed to start ntpd.service: Unit ntpd.service not found. 2026-03-08T23:53:22.890 INFO:teuthology.orchestra.run.vm04.stderr:bash: line 1: ntpq: command not found 2026-03-08T23:53:22.892 INFO:teuthology.orchestra.run.vm04.stdout:MS Name/IP address Stratum Poll Reach LastRx Last sample 2026-03-08T23:53:22.892 INFO:teuthology.orchestra.run.vm04.stdout:=============================================================================== 2026-03-08T23:53:22.920 INFO:teuthology.orchestra.run.vm10.stderr:bash: line 1: ntpq: command not found 2026-03-08T23:53:22.922 INFO:teuthology.orchestra.run.vm10.stdout:MS Name/IP address Stratum Poll Reach LastRx Last sample 2026-03-08T23:53:22.922 INFO:teuthology.orchestra.run.vm10.stdout:=============================================================================== 2026-03-08T23:53:22.923 INFO:teuthology.run_tasks:Running task cephadm... 2026-03-08T23:53:22.975 INFO:tasks.cephadm:Config: {'cephadm_branch': 'v17.2.0', 'cephadm_git_url': 'https://github.com/ceph/ceph', 'image': 'quay.io/ceph/ceph:v17.2.0', 'conf': {'global': {'mon election default strategy': 1}, 'mgr': {'debug mgr': 20, 'debug ms': 1, 'mgr/cephadm/use_agent': False}, 'mon': {'debug mon': 20, 'debug ms': 1, 'debug paxos': 20}, 'osd': {'debug ms': 1, 'debug osd': 20, 'osd mclock iops capacity threshold hdd': 49000}}, 'flavor': 'default', 'log-ignorelist': ['\\(MDS_ALL_DOWN\\)', '\\(MDS_UP_LESS_THAN_MAX\\)', 'CEPHADM_STRAY_DAEMON', 'CEPHADM_FAILED_DAEMON', 'CEPHADM_AGENT_DOWN'], 'log-only-match': ['CEPHADM_'], 'sha1': 'e911bdebe5c8faa3800735d1568fcdca65db60df'} 2026-03-08T23:53:22.975 INFO:tasks.cephadm:Cluster image is quay.io/ceph/ceph:v17.2.0 2026-03-08T23:53:22.975 INFO:tasks.cephadm:Cluster fsid is fdcbddf6-1b49-11f1-80b0-7392062373f9 2026-03-08T23:53:22.975 INFO:tasks.cephadm:Choosing monitor IPs and ports... 2026-03-08T23:53:22.975 INFO:tasks.cephadm:Monitor IPs: {'mon.a': '192.168.123.104', 'mon.c': '[v2:192.168.123.104:3301,v1:192.168.123.104:6790]', 'mon.b': '192.168.123.110'} 2026-03-08T23:53:22.975 INFO:tasks.cephadm:First mon is mon.a on vm04 2026-03-08T23:53:22.975 INFO:tasks.cephadm:First mgr is y 2026-03-08T23:53:22.975 INFO:tasks.cephadm:Normalizing hostnames... 2026-03-08T23:53:22.975 DEBUG:teuthology.orchestra.run.vm04:> sudo hostname $(hostname -s) 2026-03-08T23:53:23.012 DEBUG:teuthology.orchestra.run.vm10:> sudo hostname $(hostname -s) 2026-03-08T23:53:23.051 INFO:tasks.cephadm:Downloading cephadm (repo https://github.com/ceph/ceph ref v17.2.0)... 2026-03-08T23:53:23.051 DEBUG:teuthology.orchestra.run.vm04:> curl --silent https://raw.githubusercontent.com/ceph/ceph/v17.2.0/src/cephadm/cephadm > /home/ubuntu/cephtest/cephadm && ls -l /home/ubuntu/cephtest/cephadm 2026-03-08T23:53:23.275 INFO:teuthology.orchestra.run.vm04.stdout:-rw-r--r--. 1 ubuntu ubuntu 320521 Mar 8 23:53 /home/ubuntu/cephtest/cephadm 2026-03-08T23:53:23.276 DEBUG:teuthology.orchestra.run.vm10:> curl --silent https://raw.githubusercontent.com/ceph/ceph/v17.2.0/src/cephadm/cephadm > /home/ubuntu/cephtest/cephadm && ls -l /home/ubuntu/cephtest/cephadm 2026-03-08T23:53:23.360 INFO:teuthology.orchestra.run.vm10.stdout:-rw-r--r--. 1 ubuntu ubuntu 320521 Mar 8 23:53 /home/ubuntu/cephtest/cephadm 2026-03-08T23:53:23.360 DEBUG:teuthology.orchestra.run.vm04:> test -s /home/ubuntu/cephtest/cephadm && test $(stat -c%s /home/ubuntu/cephtest/cephadm) -gt 1000 && chmod +x /home/ubuntu/cephtest/cephadm 2026-03-08T23:53:23.382 DEBUG:teuthology.orchestra.run.vm10:> test -s /home/ubuntu/cephtest/cephadm && test $(stat -c%s /home/ubuntu/cephtest/cephadm) -gt 1000 && chmod +x /home/ubuntu/cephtest/cephadm 2026-03-08T23:53:23.416 INFO:tasks.cephadm:Pulling image quay.io/ceph/ceph:v17.2.0 on all hosts... 2026-03-08T23:53:23.416 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 pull 2026-03-08T23:53:23.424 DEBUG:teuthology.orchestra.run.vm10:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 pull 2026-03-08T23:53:23.639 INFO:teuthology.orchestra.run.vm04.stderr:Pulling container image quay.io/ceph/ceph:v17.2.0... 2026-03-08T23:53:23.680 INFO:teuthology.orchestra.run.vm10.stderr:Pulling container image quay.io/ceph/ceph:v17.2.0... 2026-03-08T23:53:55.097 INFO:teuthology.orchestra.run.vm04.stdout:{ 2026-03-08T23:53:55.097 INFO:teuthology.orchestra.run.vm04.stdout: "ceph_version": "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)", 2026-03-08T23:53:55.097 INFO:teuthology.orchestra.run.vm04.stdout: "image_id": "e1d6a67b021eb077ee22bf650f1a9fb1980a2cf5c36bdb9cba9eac6de8f702d9", 2026-03-08T23:53:55.097 INFO:teuthology.orchestra.run.vm04.stdout: "repo_digests": [ 2026-03-08T23:53:55.097 INFO:teuthology.orchestra.run.vm04.stdout: "quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a", 2026-03-08T23:53:55.097 INFO:teuthology.orchestra.run.vm04.stdout: "quay.io/ceph/ceph@sha256:cb4d698cb769b6aba05bf6ef04f41a7fe694160140347576e13bd9348514b667" 2026-03-08T23:53:55.097 INFO:teuthology.orchestra.run.vm04.stdout: ] 2026-03-08T23:53:55.097 INFO:teuthology.orchestra.run.vm04.stdout:} 2026-03-08T23:53:55.303 INFO:teuthology.orchestra.run.vm10.stdout:{ 2026-03-08T23:53:55.304 INFO:teuthology.orchestra.run.vm10.stdout: "ceph_version": "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)", 2026-03-08T23:53:55.304 INFO:teuthology.orchestra.run.vm10.stdout: "image_id": "e1d6a67b021eb077ee22bf650f1a9fb1980a2cf5c36bdb9cba9eac6de8f702d9", 2026-03-08T23:53:55.304 INFO:teuthology.orchestra.run.vm10.stdout: "repo_digests": [ 2026-03-08T23:53:55.304 INFO:teuthology.orchestra.run.vm10.stdout: "quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a", 2026-03-08T23:53:55.304 INFO:teuthology.orchestra.run.vm10.stdout: "quay.io/ceph/ceph@sha256:cb4d698cb769b6aba05bf6ef04f41a7fe694160140347576e13bd9348514b667" 2026-03-08T23:53:55.304 INFO:teuthology.orchestra.run.vm10.stdout: ] 2026-03-08T23:53:55.304 INFO:teuthology.orchestra.run.vm10.stdout:} 2026-03-08T23:53:55.338 DEBUG:teuthology.orchestra.run.vm04:> sudo mkdir -p /etc/ceph 2026-03-08T23:53:55.374 DEBUG:teuthology.orchestra.run.vm10:> sudo mkdir -p /etc/ceph 2026-03-08T23:53:55.417 DEBUG:teuthology.orchestra.run.vm04:> sudo chmod 777 /etc/ceph 2026-03-08T23:53:55.448 DEBUG:teuthology.orchestra.run.vm10:> sudo chmod 777 /etc/ceph 2026-03-08T23:53:55.491 INFO:tasks.cephadm:Writing seed config... 2026-03-08T23:53:55.491 INFO:tasks.cephadm: override: [global] mon election default strategy = 1 2026-03-08T23:53:55.491 INFO:tasks.cephadm: override: [mgr] debug mgr = 20 2026-03-08T23:53:55.491 INFO:tasks.cephadm: override: [mgr] debug ms = 1 2026-03-08T23:53:55.491 INFO:tasks.cephadm: override: [mgr] mgr/cephadm/use_agent = False 2026-03-08T23:53:55.491 INFO:tasks.cephadm: override: [mon] debug mon = 20 2026-03-08T23:53:55.491 INFO:tasks.cephadm: override: [mon] debug ms = 1 2026-03-08T23:53:55.491 INFO:tasks.cephadm: override: [mon] debug paxos = 20 2026-03-08T23:53:55.491 INFO:tasks.cephadm: override: [osd] debug ms = 1 2026-03-08T23:53:55.491 INFO:tasks.cephadm: override: [osd] debug osd = 20 2026-03-08T23:53:55.491 INFO:tasks.cephadm: override: [osd] osd mclock iops capacity threshold hdd = 49000 2026-03-08T23:53:55.492 DEBUG:teuthology.orchestra.run.vm04:> set -ex 2026-03-08T23:53:55.492 DEBUG:teuthology.orchestra.run.vm04:> dd of=/home/ubuntu/cephtest/seed.ceph.conf 2026-03-08T23:53:55.511 DEBUG:tasks.cephadm:Final config: [global] # make logging friendly to teuthology log_to_file = true log_to_stderr = false log to journald = false mon cluster log to file = true mon cluster log file level = debug mon clock drift allowed = 1.000 # replicate across OSDs, not hosts osd crush chooseleaf type = 0 #osd pool default size = 2 osd pool default erasure code profile = plugin=jerasure technique=reed_sol_van k=2 m=1 crush-failure-domain=osd # enable some debugging auth debug = true ms die on old message = true ms die on bug = true debug asserts on shutdown = true # adjust warnings mon max pg per osd = 10000# >= luminous mon pg warn max object skew = 0 mon osd allow primary affinity = true mon osd allow pg remap = true mon warn on legacy crush tunables = false mon warn on crush straw calc version zero = false mon warn on no sortbitwise = false mon warn on osd down out interval zero = false mon warn on too few osds = false mon_warn_on_pool_pg_num_not_power_of_two = false # disable pg_autoscaler by default for new pools osd_pool_default_pg_autoscale_mode = off # tests delete pools mon allow pool delete = true fsid = fdcbddf6-1b49-11f1-80b0-7392062373f9 mon election default strategy = 1 [osd] osd scrub load threshold = 5.0 osd scrub max interval = 600 osd mclock profile = high_recovery_ops osd recover clone overlap = true osd recovery max chunk = 1048576 osd deep scrub update digest min age = 30 osd map max advance = 10 osd memory target autotune = true # debugging osd debug shutdown = true osd debug op order = true osd debug verify stray on activate = true osd debug pg log writeout = true osd debug verify cached snaps = true osd debug verify missing on start = true osd debug misdirected ops = true osd op queue = debug_random osd op queue cut off = debug_random osd shutdown pgref assert = true bdev debug aio = true osd sloppy crc = true debug ms = 1 debug osd = 20 osd mclock iops capacity threshold hdd = 49000 [mgr] mon reweight min pgs per osd = 4 mon reweight min bytes per osd = 10 mgr/telemetry/nag = false debug mgr = 20 debug ms = 1 mgr/cephadm/use_agent = False [mon] mon data avail warn = 5 mon mgr mkfs grace = 240 mon reweight min pgs per osd = 4 mon osd reporter subtree level = osd mon osd prime pg temp = true mon reweight min bytes per osd = 10 # rotate auth tickets quickly to exercise renewal paths auth mon ticket ttl = 660# 11m auth service ticket ttl = 240# 4m # don't complain about global id reclaim mon_warn_on_insecure_global_id_reclaim = false mon_warn_on_insecure_global_id_reclaim_allowed = false debug mon = 20 debug ms = 1 debug paxos = 20 [client.rgw] rgw cache enabled = true rgw enable ops log = true rgw enable usage log = true 2026-03-08T23:53:55.511 DEBUG:teuthology.orchestra.run.vm04:mon.a> sudo journalctl -f -n 0 -u ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@mon.a.service 2026-03-08T23:53:55.554 DEBUG:teuthology.orchestra.run.vm04:mgr.y> sudo journalctl -f -n 0 -u ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@mgr.y.service 2026-03-08T23:53:55.596 INFO:tasks.cephadm:Bootstrapping... 2026-03-08T23:53:55.596 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 -v bootstrap --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 --config /home/ubuntu/cephtest/seed.ceph.conf --output-config /etc/ceph/ceph.conf --output-keyring /etc/ceph/ceph.client.admin.keyring --output-pub-ssh-key /home/ubuntu/cephtest/ceph.pub --mon-id a --mgr-id y --orphan-initial-daemons --skip-monitoring-stack --mon-ip 192.168.123.104 --skip-admin-label && sudo chmod +r /etc/ceph/ceph.client.admin.keyring 2026-03-08T23:53:55.778 INFO:teuthology.orchestra.run.vm04.stderr:-------------------------------------------------------------------------------- 2026-03-08T23:53:55.778 INFO:teuthology.orchestra.run.vm04.stderr:cephadm ['--image', 'quay.io/ceph/ceph:v17.2.0', '-v', 'bootstrap', '--fsid', 'fdcbddf6-1b49-11f1-80b0-7392062373f9', '--config', '/home/ubuntu/cephtest/seed.ceph.conf', '--output-config', '/etc/ceph/ceph.conf', '--output-keyring', '/etc/ceph/ceph.client.admin.keyring', '--output-pub-ssh-key', '/home/ubuntu/cephtest/ceph.pub', '--mon-id', 'a', '--mgr-id', 'y', '--orphan-initial-daemons', '--skip-monitoring-stack', '--mon-ip', '192.168.123.104', '--skip-admin-label'] 2026-03-08T23:53:55.798 INFO:teuthology.orchestra.run.vm04.stderr:/bin/podman: 5.8.0 2026-03-08T23:53:55.803 INFO:teuthology.orchestra.run.vm04.stderr:Verifying podman|docker is present... 2026-03-08T23:53:55.828 INFO:teuthology.orchestra.run.vm04.stderr:/bin/podman: 5.8.0 2026-03-08T23:53:55.833 INFO:teuthology.orchestra.run.vm04.stderr:Verifying lvm2 is present... 2026-03-08T23:53:55.834 INFO:teuthology.orchestra.run.vm04.stderr:Verifying time synchronization is in place... 2026-03-08T23:53:55.842 INFO:teuthology.orchestra.run.vm04.stderr:systemctl: Failed to get unit file state for chrony.service: No such file or directory 2026-03-08T23:53:55.848 INFO:teuthology.orchestra.run.vm04.stderr:systemctl: inactive 2026-03-08T23:53:55.857 INFO:teuthology.orchestra.run.vm04.stderr:systemctl: enabled 2026-03-08T23:53:55.866 INFO:teuthology.orchestra.run.vm04.stderr:systemctl: active 2026-03-08T23:53:55.867 INFO:teuthology.orchestra.run.vm04.stderr:Unit chronyd.service is enabled and running 2026-03-08T23:53:55.867 INFO:teuthology.orchestra.run.vm04.stderr:Repeating the final host check... 2026-03-08T23:53:55.894 INFO:teuthology.orchestra.run.vm04.stderr:/bin/podman: 5.8.0 2026-03-08T23:53:55.898 INFO:teuthology.orchestra.run.vm04.stderr:podman (/bin/podman) version 5.8.0 is present 2026-03-08T23:53:55.898 INFO:teuthology.orchestra.run.vm04.stderr:systemctl is present 2026-03-08T23:53:55.898 INFO:teuthology.orchestra.run.vm04.stderr:lvcreate is present 2026-03-08T23:53:55.906 INFO:teuthology.orchestra.run.vm04.stderr:systemctl: Failed to get unit file state for chrony.service: No such file or directory 2026-03-08T23:53:55.915 INFO:teuthology.orchestra.run.vm04.stderr:systemctl: inactive 2026-03-08T23:53:55.925 INFO:teuthology.orchestra.run.vm04.stderr:systemctl: enabled 2026-03-08T23:53:55.935 INFO:teuthology.orchestra.run.vm04.stderr:systemctl: active 2026-03-08T23:53:55.935 INFO:teuthology.orchestra.run.vm04.stderr:Unit chronyd.service is enabled and running 2026-03-08T23:53:55.935 INFO:teuthology.orchestra.run.vm04.stderr:Host looks OK 2026-03-08T23:53:55.935 INFO:teuthology.orchestra.run.vm04.stderr:Cluster fsid: fdcbddf6-1b49-11f1-80b0-7392062373f9 2026-03-08T23:53:55.935 INFO:teuthology.orchestra.run.vm04.stderr:Acquiring lock 139771551124544 on /run/cephadm/fdcbddf6-1b49-11f1-80b0-7392062373f9.lock 2026-03-08T23:53:55.936 INFO:teuthology.orchestra.run.vm04.stderr:Lock 139771551124544 acquired on /run/cephadm/fdcbddf6-1b49-11f1-80b0-7392062373f9.lock 2026-03-08T23:53:55.936 INFO:teuthology.orchestra.run.vm04.stderr:Verifying IP 192.168.123.104 port 3300 ... 2026-03-08T23:53:55.937 INFO:teuthology.orchestra.run.vm04.stderr:Verifying IP 192.168.123.104 port 6789 ... 2026-03-08T23:53:55.937 INFO:teuthology.orchestra.run.vm04.stderr:Base mon IP is 192.168.123.104, final addrv is [v2:192.168.123.104:3300,v1:192.168.123.104:6789] 2026-03-08T23:53:55.942 INFO:teuthology.orchestra.run.vm04.stderr:/sbin/ip: default via 192.168.123.1 dev eth0 proto dhcp src 192.168.123.104 metric 100 2026-03-08T23:53:55.942 INFO:teuthology.orchestra.run.vm04.stderr:/sbin/ip: 192.168.123.0/24 dev eth0 proto kernel scope link src 192.168.123.104 metric 100 2026-03-08T23:53:55.947 INFO:teuthology.orchestra.run.vm04.stderr:/sbin/ip: ::1 dev lo proto kernel metric 256 pref medium 2026-03-08T23:53:55.947 INFO:teuthology.orchestra.run.vm04.stderr:/sbin/ip: fe80::/64 dev eth0 proto kernel metric 1024 pref medium 2026-03-08T23:53:55.952 INFO:teuthology.orchestra.run.vm04.stderr:/sbin/ip: 1: lo: mtu 65536 state UNKNOWN qlen 1000 2026-03-08T23:53:55.952 INFO:teuthology.orchestra.run.vm04.stderr:/sbin/ip: inet6 ::1/128 scope host 2026-03-08T23:53:55.952 INFO:teuthology.orchestra.run.vm04.stderr:/sbin/ip: valid_lft forever preferred_lft forever 2026-03-08T23:53:55.952 INFO:teuthology.orchestra.run.vm04.stderr:/sbin/ip: 2: eth0: mtu 1500 state UP qlen 1000 2026-03-08T23:53:55.952 INFO:teuthology.orchestra.run.vm04.stderr:/sbin/ip: inet6 fe80::5055:ff:fe00:4/64 scope link noprefixroute 2026-03-08T23:53:55.952 INFO:teuthology.orchestra.run.vm04.stderr:/sbin/ip: valid_lft forever preferred_lft forever 2026-03-08T23:53:55.953 INFO:teuthology.orchestra.run.vm04.stderr:Mon IP `192.168.123.104` is in CIDR network `192.168.123.0/24` 2026-03-08T23:53:55.953 INFO:teuthology.orchestra.run.vm04.stderr:- internal network (--cluster-network) has not been provided, OSD replication will default to the public_network 2026-03-08T23:53:55.954 INFO:teuthology.orchestra.run.vm04.stderr:Pulling container image quay.io/ceph/ceph:v17.2.0... 2026-03-08T23:53:55.980 INFO:teuthology.orchestra.run.vm04.stderr:/bin/podman: Trying to pull quay.io/ceph/ceph:v17.2.0... 2026-03-08T23:53:57.130 INFO:teuthology.orchestra.run.vm04.stderr:/bin/podman: Getting image source signatures 2026-03-08T23:53:57.130 INFO:teuthology.orchestra.run.vm04.stderr:/bin/podman: Copying blob sha256:33ca8fff7868c4dc0c11e09bca97c720eb9cfbab7221216754367dd8de70388a 2026-03-08T23:53:57.130 INFO:teuthology.orchestra.run.vm04.stderr:/bin/podman: Copying blob sha256:89b4a75bc2d8500f15463747507c9623df43886c134463e7f0527e70900e7a7b 2026-03-08T23:53:57.130 INFO:teuthology.orchestra.run.vm04.stderr:/bin/podman: Copying blob sha256:c32ab78b488d0c72f64eded765c0cf6b5bf2c75dab66cb62a9d367fa6ec42513 2026-03-08T23:53:57.130 INFO:teuthology.orchestra.run.vm04.stderr:/bin/podman: Copying blob sha256:a70843738bb77e1ab9c1f85969ebdfa55f178e746be081d1cb4f94011f69eb7c 2026-03-08T23:53:57.130 INFO:teuthology.orchestra.run.vm04.stderr:/bin/podman: Copying blob sha256:599d07cb321ff0a3c82224e1138fc685793fa69b93ed5780415751a5f7e4b8c2 2026-03-08T23:53:57.131 INFO:teuthology.orchestra.run.vm04.stderr:/bin/podman: Copying config sha256:e1d6a67b021eb077ee22bf650f1a9fb1980a2cf5c36bdb9cba9eac6de8f702d9 2026-03-08T23:53:57.134 INFO:teuthology.orchestra.run.vm04.stderr:/bin/podman: Writing manifest to image destination 2026-03-08T23:53:57.139 INFO:teuthology.orchestra.run.vm04.stderr:/bin/podman: e1d6a67b021eb077ee22bf650f1a9fb1980a2cf5c36bdb9cba9eac6de8f702d9 2026-03-08T23:53:57.293 INFO:teuthology.orchestra.run.vm04.stderr:ceph: ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable) 2026-03-08T23:53:57.379 INFO:teuthology.orchestra.run.vm04.stderr:Ceph version: ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable) 2026-03-08T23:53:57.379 INFO:teuthology.orchestra.run.vm04.stderr:Extracting ceph user uid/gid from container image... 2026-03-08T23:53:57.460 INFO:teuthology.orchestra.run.vm04.stderr:stat: 167 167 2026-03-08T23:53:57.478 INFO:teuthology.orchestra.run.vm04.stderr:Creating initial keys... 2026-03-08T23:53:57.579 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph-authtool: AQAVDK5poiyQIhAAk6D0/Sn1e75/i7xHtOJhXQ== 2026-03-08T23:53:57.711 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph-authtool: AQAVDK5pE/xnKhAAiDKEFnozdf2xZCbjQPHqTw== 2026-03-08T23:53:57.850 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph-authtool: AQAVDK5pKA6sMhAAxytAJGFTkQFFJejhXbN6Vg== 2026-03-08T23:53:57.871 INFO:teuthology.orchestra.run.vm04.stderr:Creating initial monmap... 2026-03-08T23:53:57.979 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/monmaptool: /usr/bin/monmaptool: monmap file /tmp/monmap 2026-03-08T23:53:57.979 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/monmaptool: setting min_mon_release = octopus 2026-03-08T23:53:57.979 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/monmaptool: /usr/bin/monmaptool: set fsid to fdcbddf6-1b49-11f1-80b0-7392062373f9 2026-03-08T23:53:57.979 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/monmaptool: /usr/bin/monmaptool: writing epoch 0 to /tmp/monmap (1 monitors) 2026-03-08T23:53:58.015 INFO:teuthology.orchestra.run.vm04.stderr:monmaptool for a [v2:192.168.123.104:3300,v1:192.168.123.104:6789] on /usr/bin/monmaptool: monmap file /tmp/monmap 2026-03-08T23:53:58.015 INFO:teuthology.orchestra.run.vm04.stderr:setting min_mon_release = octopus 2026-03-08T23:53:58.015 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/monmaptool: set fsid to fdcbddf6-1b49-11f1-80b0-7392062373f9 2026-03-08T23:53:58.015 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/monmaptool: writing epoch 0 to /tmp/monmap (1 monitors) 2026-03-08T23:53:58.015 INFO:teuthology.orchestra.run.vm04.stderr: 2026-03-08T23:53:58.015 INFO:teuthology.orchestra.run.vm04.stderr:Creating mon... 2026-03-08T23:53:58.153 INFO:teuthology.orchestra.run.vm04.stderr:create mon.a on 2026-03-08T23:53:58.364 INFO:teuthology.orchestra.run.vm04.stderr:systemctl: Created symlink /etc/systemd/system/multi-user.target.wants/ceph.target → /etc/systemd/system/ceph.target. 2026-03-08T23:53:58.556 INFO:teuthology.orchestra.run.vm04.stderr:systemctl: Created symlink /etc/systemd/system/multi-user.target.wants/ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9.target → /etc/systemd/system/ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9.target. 2026-03-08T23:53:58.556 INFO:teuthology.orchestra.run.vm04.stderr:systemctl: Created symlink /etc/systemd/system/ceph.target.wants/ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9.target → /etc/systemd/system/ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9.target. 2026-03-08T23:53:59.075 INFO:teuthology.orchestra.run.vm04.stderr:systemctl: Failed to reset failed state of unit ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@mon.a.service: Unit ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@mon.a.service not loaded. 2026-03-08T23:53:59.092 INFO:teuthology.orchestra.run.vm04.stderr:systemctl: Created symlink /etc/systemd/system/ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9.target.wants/ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@mon.a.service → /etc/systemd/system/ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@.service. 2026-03-08T23:53:59.928 INFO:teuthology.orchestra.run.vm04.stderr:firewalld does not appear to be present 2026-03-08T23:53:59.929 INFO:teuthology.orchestra.run.vm04.stderr:Not possible to enable service . firewalld.service is not available 2026-03-08T23:53:59.929 INFO:teuthology.orchestra.run.vm04.stderr:Waiting for mon to start... 2026-03-08T23:53:59.929 INFO:teuthology.orchestra.run.vm04.stderr:Waiting for mon... 2026-03-08T23:54:00.149 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: cluster: 2026-03-08T23:54:00.149 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: id: fdcbddf6-1b49-11f1-80b0-7392062373f9 2026-03-08T23:54:00.150 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: health: HEALTH_OK 2026-03-08T23:54:00.150 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: 2026-03-08T23:54:00.150 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: services: 2026-03-08T23:54:00.150 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: mon: 1 daemons, quorum a (age 0.157402s) 2026-03-08T23:54:00.150 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: mgr: no daemons active 2026-03-08T23:54:00.150 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: osd: 0 osds: 0 up, 0 in 2026-03-08T23:54:00.150 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: 2026-03-08T23:54:00.150 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: data: 2026-03-08T23:54:00.150 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: pools: 0 pools, 0 pgs 2026-03-08T23:54:00.150 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: objects: 0 objects, 0 B 2026-03-08T23:54:00.150 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: usage: 0 B used, 0 B / 0 B avail 2026-03-08T23:54:00.150 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: pgs: 2026-03-08T23:54:00.150 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: 2026-03-08T23:54:00.185 INFO:teuthology.orchestra.run.vm04.stderr:mon is available 2026-03-08T23:54:00.186 INFO:teuthology.orchestra.run.vm04.stderr:Assimilating anything we can from ceph.conf... 2026-03-08T23:54:00.432 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: 2026-03-08T23:54:00.433 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: [global] 2026-03-08T23:54:00.433 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: fsid = fdcbddf6-1b49-11f1-80b0-7392062373f9 2026-03-08T23:54:00.433 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: mon_host = [v2:192.168.123.104:3300,v1:192.168.123.104:6789] 2026-03-08T23:54:00.433 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: mon_osd_allow_pg_remap = true 2026-03-08T23:54:00.433 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: mon_osd_allow_primary_affinity = true 2026-03-08T23:54:00.433 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: mon_warn_on_no_sortbitwise = false 2026-03-08T23:54:00.433 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: osd_crush_chooseleaf_type = 0 2026-03-08T23:54:00.433 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: 2026-03-08T23:54:00.433 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: [mgr] 2026-03-08T23:54:00.433 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: mgr/cephadm/use_agent = False 2026-03-08T23:54:00.433 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: mgr/telemetry/nag = false 2026-03-08T23:54:00.433 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: 2026-03-08T23:54:00.433 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: [osd] 2026-03-08T23:54:00.433 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: osd_map_max_advance = 10 2026-03-08T23:54:00.433 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: osd_mclock_iops_capacity_threshold_hdd = 49000 2026-03-08T23:54:00.433 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: osd_sloppy_crc = true 2026-03-08T23:54:00.527 INFO:teuthology.orchestra.run.vm04.stderr:Generating new minimal ceph.conf... 2026-03-08T23:54:00.763 INFO:teuthology.orchestra.run.vm04.stderr:Restarting the monitor... 2026-03-08T23:54:01.130 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 podman[46808]: 2026-03-08 23:54:01.059432251 +0000 UTC m=+0.015668966 image pull e1d6a67b021eb077ee22bf650f1a9fb1980a2cf5c36bdb9cba9eac6de8f702d9 quay.io/ceph/ceph:v17.2.0 2026-03-08T23:54:01.135 INFO:teuthology.orchestra.run.vm04.stderr:Setting mon public_network to 192.168.123.0/24 2026-03-08T23:54:01.384 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 systemd[1]: Started Ceph mon.a for fdcbddf6-1b49-11f1-80b0-7392062373f9. 2026-03-08T23:54:01.384 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: set uid:gid to 167:167 (ceph:ceph) 2026-03-08T23:54:01.384 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable), process ceph-mon, pid 2 2026-03-08T23:54:01.384 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: pidfile_write: ignore empty --pid-file 2026-03-08T23:54:01.384 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: load: jerasure load: lrc 2026-03-08T23:54:01.384 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: RocksDB version: 6.15.5 2026-03-08T23:54:01.384 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Git sha rocksdb_build_git_sha:@0@ 2026-03-08T23:54:01.384 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Compile date Apr 18 2022 2026-03-08T23:54:01.384 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: DB SUMMARY 2026-03-08T23:54:01.384 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: DB Session ID: UXCJFN0D1PEEBITB53DH 2026-03-08T23:54:01.384 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: CURRENT file: CURRENT 2026-03-08T23:54:01.384 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: IDENTITY file: IDENTITY 2026-03-08T23:54:01.384 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: MANIFEST file: MANIFEST-000009 size: 131 Bytes 2026-03-08T23:54:01.384 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: SST files in /var/lib/ceph/mon/ceph-a/store.db dir, Total Num: 1, files: 000008.sst 2026-03-08T23:54:01.385 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Write Ahead Log file in /var/lib/ceph/mon/ceph-a/store.db: 000010.log size: 73743 ; 2026-03-08T23:54:01.385 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.error_if_exists: 0 2026-03-08T23:54:01.385 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.create_if_missing: 0 2026-03-08T23:54:01.385 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.paranoid_checks: 1 2026-03-08T23:54:01.385 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.track_and_verify_wals_in_manifest: 0 2026-03-08T23:54:01.385 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.env: 0x55830dac6860 2026-03-08T23:54:01.385 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.fs: Posix File System 2026-03-08T23:54:01.385 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.info_log: 0x55830eccfdc0 2026-03-08T23:54:01.385 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.max_file_opening_threads: 16 2026-03-08T23:54:01.385 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.statistics: (nil) 2026-03-08T23:54:01.385 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.use_fsync: 0 2026-03-08T23:54:01.385 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.max_log_file_size: 0 2026-03-08T23:54:01.385 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.max_manifest_file_size: 1073741824 2026-03-08T23:54:01.385 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.log_file_time_to_roll: 0 2026-03-08T23:54:01.385 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.keep_log_file_num: 1000 2026-03-08T23:54:01.385 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.recycle_log_file_num: 0 2026-03-08T23:54:01.385 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.allow_fallocate: 1 2026-03-08T23:54:01.385 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.allow_mmap_reads: 0 2026-03-08T23:54:01.385 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.allow_mmap_writes: 0 2026-03-08T23:54:01.385 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.use_direct_reads: 0 2026-03-08T23:54:01.385 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.use_direct_io_for_flush_and_compaction: 0 2026-03-08T23:54:01.385 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.create_missing_column_families: 0 2026-03-08T23:54:01.385 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.db_log_dir: 2026-03-08T23:54:01.385 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.wal_dir: /var/lib/ceph/mon/ceph-a/store.db 2026-03-08T23:54:01.385 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.table_cache_numshardbits: 6 2026-03-08T23:54:01.385 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.WAL_ttl_seconds: 0 2026-03-08T23:54:01.385 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.WAL_size_limit_MB: 0 2026-03-08T23:54:01.385 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.max_write_batch_group_size_bytes: 1048576 2026-03-08T23:54:01.385 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.manifest_preallocation_size: 4194304 2026-03-08T23:54:01.385 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.is_fd_close_on_exec: 1 2026-03-08T23:54:01.385 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.advise_random_on_open: 1 2026-03-08T23:54:01.385 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.db_write_buffer_size: 0 2026-03-08T23:54:01.385 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.write_buffer_manager: 0x55830edc0240 2026-03-08T23:54:01.385 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.access_hint_on_compaction_start: 1 2026-03-08T23:54:01.385 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.new_table_reader_for_compaction_inputs: 0 2026-03-08T23:54:01.385 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.random_access_max_buffer_size: 1048576 2026-03-08T23:54:01.385 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.use_adaptive_mutex: 0 2026-03-08T23:54:01.385 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.rate_limiter: (nil) 2026-03-08T23:54:01.385 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.sst_file_manager.rate_bytes_per_sec: 0 2026-03-08T23:54:01.385 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.wal_recovery_mode: 2 2026-03-08T23:54:01.385 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.enable_thread_tracking: 0 2026-03-08T23:54:01.385 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.enable_pipelined_write: 0 2026-03-08T23:54:01.385 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.unordered_write: 0 2026-03-08T23:54:01.385 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.allow_concurrent_memtable_write: 1 2026-03-08T23:54:01.385 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.enable_write_thread_adaptive_yield: 1 2026-03-08T23:54:01.385 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.write_thread_max_yield_usec: 100 2026-03-08T23:54:01.385 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.write_thread_slow_yield_usec: 3 2026-03-08T23:54:01.385 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.row_cache: None 2026-03-08T23:54:01.386 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.wal_filter: None 2026-03-08T23:54:01.386 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.avoid_flush_during_recovery: 0 2026-03-08T23:54:01.386 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.allow_ingest_behind: 0 2026-03-08T23:54:01.386 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.preserve_deletes: 0 2026-03-08T23:54:01.386 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.two_write_queues: 0 2026-03-08T23:54:01.386 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.manual_wal_flush: 0 2026-03-08T23:54:01.386 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.atomic_flush: 0 2026-03-08T23:54:01.386 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.avoid_unnecessary_blocking_io: 0 2026-03-08T23:54:01.386 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.persist_stats_to_disk: 0 2026-03-08T23:54:01.386 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.write_dbid_to_manifest: 0 2026-03-08T23:54:01.386 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.log_readahead_size: 0 2026-03-08T23:54:01.386 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.file_checksum_gen_factory: Unknown 2026-03-08T23:54:01.386 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.best_efforts_recovery: 0 2026-03-08T23:54:01.386 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.max_bgerror_resume_count: 2147483647 2026-03-08T23:54:01.386 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.bgerror_resume_retry_interval: 1000000 2026-03-08T23:54:01.386 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.allow_data_in_errors: 0 2026-03-08T23:54:01.386 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.db_host_id: __hostname__ 2026-03-08T23:54:01.386 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.max_background_jobs: 2 2026-03-08T23:54:01.386 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.max_background_compactions: -1 2026-03-08T23:54:01.386 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.max_subcompactions: 1 2026-03-08T23:54:01.386 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.avoid_flush_during_shutdown: 0 2026-03-08T23:54:01.386 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.writable_file_max_buffer_size: 1048576 2026-03-08T23:54:01.386 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.delayed_write_rate : 16777216 2026-03-08T23:54:01.386 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.max_total_wal_size: 0 2026-03-08T23:54:01.386 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.delete_obsolete_files_period_micros: 21600000000 2026-03-08T23:54:01.386 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.stats_dump_period_sec: 600 2026-03-08T23:54:01.386 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.stats_persist_period_sec: 600 2026-03-08T23:54:01.386 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.stats_history_buffer_size: 1048576 2026-03-08T23:54:01.386 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.max_open_files: -1 2026-03-08T23:54:01.386 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.bytes_per_sync: 0 2026-03-08T23:54:01.386 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.wal_bytes_per_sync: 0 2026-03-08T23:54:01.386 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.strict_bytes_per_sync: 0 2026-03-08T23:54:01.386 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.compaction_readahead_size: 0 2026-03-08T23:54:01.386 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.max_background_flushes: -1 2026-03-08T23:54:01.386 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Compression algorithms supported: 2026-03-08T23:54:01.386 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: kZSTDNotFinalCompression supported: 0 2026-03-08T23:54:01.386 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: kZSTD supported: 0 2026-03-08T23:54:01.386 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: kXpressCompression supported: 0 2026-03-08T23:54:01.386 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: kLZ4HCCompression supported: 1 2026-03-08T23:54:01.386 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: kLZ4Compression supported: 1 2026-03-08T23:54:01.386 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: kBZip2Compression supported: 0 2026-03-08T23:54:01.386 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: kZlibCompression supported: 1 2026-03-08T23:54:01.386 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: kSnappyCompression supported: 1 2026-03-08T23:54:01.386 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Fast CRC32 supported: Supported on x86 2026-03-08T23:54:01.386 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: [db/version_set.cc:4725] Recovering from manifest file: /var/lib/ceph/mon/ceph-a/store.db/MANIFEST-000009 2026-03-08T23:54:01.386 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: [db/column_family.cc:597] --------------- Options for column family [default]: 2026-03-08T23:54:01.387 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.comparator: leveldb.BytewiseComparator 2026-03-08T23:54:01.387 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.merge_operator: 2026-03-08T23:54:01.387 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.compaction_filter: None 2026-03-08T23:54:01.387 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.compaction_filter_factory: None 2026-03-08T23:54:01.387 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.sst_partitioner_factory: None 2026-03-08T23:54:01.387 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.memtable_factory: SkipListFactory 2026-03-08T23:54:01.387 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.table_factory: BlockBasedTable 2026-03-08T23:54:01.387 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: table_factory options: flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x55830ec9bd00) 2026-03-08T23:54:01.387 INFO:journalctl@ceph.mon.a.vm04.stdout: cache_index_and_filter_blocks: 1 2026-03-08T23:54:01.387 INFO:journalctl@ceph.mon.a.vm04.stdout: cache_index_and_filter_blocks_with_high_priority: 0 2026-03-08T23:54:01.387 INFO:journalctl@ceph.mon.a.vm04.stdout: pin_l0_filter_and_index_blocks_in_cache: 0 2026-03-08T23:54:01.387 INFO:journalctl@ceph.mon.a.vm04.stdout: pin_top_level_index_and_filter: 1 2026-03-08T23:54:01.387 INFO:journalctl@ceph.mon.a.vm04.stdout: index_type: 0 2026-03-08T23:54:01.387 INFO:journalctl@ceph.mon.a.vm04.stdout: data_block_index_type: 0 2026-03-08T23:54:01.387 INFO:journalctl@ceph.mon.a.vm04.stdout: index_shortening: 1 2026-03-08T23:54:01.387 INFO:journalctl@ceph.mon.a.vm04.stdout: data_block_hash_table_util_ratio: 0.750000 2026-03-08T23:54:01.387 INFO:journalctl@ceph.mon.a.vm04.stdout: hash_index_allow_collision: 1 2026-03-08T23:54:01.387 INFO:journalctl@ceph.mon.a.vm04.stdout: checksum: 1 2026-03-08T23:54:01.387 INFO:journalctl@ceph.mon.a.vm04.stdout: no_block_cache: 0 2026-03-08T23:54:01.387 INFO:journalctl@ceph.mon.a.vm04.stdout: block_cache: 0x55830ed06170 2026-03-08T23:54:01.387 INFO:journalctl@ceph.mon.a.vm04.stdout: block_cache_name: BinnedLRUCache 2026-03-08T23:54:01.387 INFO:journalctl@ceph.mon.a.vm04.stdout: block_cache_options: 2026-03-08T23:54:01.387 INFO:journalctl@ceph.mon.a.vm04.stdout: capacity : 536870912 2026-03-08T23:54:01.387 INFO:journalctl@ceph.mon.a.vm04.stdout: num_shard_bits : 4 2026-03-08T23:54:01.387 INFO:journalctl@ceph.mon.a.vm04.stdout: strict_capacity_limit : 0 2026-03-08T23:54:01.387 INFO:journalctl@ceph.mon.a.vm04.stdout: high_pri_pool_ratio: 0.000 2026-03-08T23:54:01.387 INFO:journalctl@ceph.mon.a.vm04.stdout: block_cache_compressed: (nil) 2026-03-08T23:54:01.387 INFO:journalctl@ceph.mon.a.vm04.stdout: persistent_cache: (nil) 2026-03-08T23:54:01.387 INFO:journalctl@ceph.mon.a.vm04.stdout: block_size: 4096 2026-03-08T23:54:01.387 INFO:journalctl@ceph.mon.a.vm04.stdout: block_size_deviation: 10 2026-03-08T23:54:01.387 INFO:journalctl@ceph.mon.a.vm04.stdout: block_restart_interval: 16 2026-03-08T23:54:01.387 INFO:journalctl@ceph.mon.a.vm04.stdout: index_block_restart_interval: 1 2026-03-08T23:54:01.387 INFO:journalctl@ceph.mon.a.vm04.stdout: metadata_block_size: 4096 2026-03-08T23:54:01.387 INFO:journalctl@ceph.mon.a.vm04.stdout: partition_filters: 0 2026-03-08T23:54:01.387 INFO:journalctl@ceph.mon.a.vm04.stdout: use_delta_encoding: 1 2026-03-08T23:54:01.387 INFO:journalctl@ceph.mon.a.vm04.stdout: filter_policy: rocksdb.BuiltinBloomFilter 2026-03-08T23:54:01.387 INFO:journalctl@ceph.mon.a.vm04.stdout: whole_key_filtering: 1 2026-03-08T23:54:01.387 INFO:journalctl@ceph.mon.a.vm04.stdout: verify_compression: 0 2026-03-08T23:54:01.387 INFO:journalctl@ceph.mon.a.vm04.stdout: read_amp_bytes_per_bit: 0 2026-03-08T23:54:01.387 INFO:journalctl@ceph.mon.a.vm04.stdout: format_version: 4 2026-03-08T23:54:01.387 INFO:journalctl@ceph.mon.a.vm04.stdout: enable_index_compression: 1 2026-03-08T23:54:01.387 INFO:journalctl@ceph.mon.a.vm04.stdout: block_align: 0 2026-03-08T23:54:01.387 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.write_buffer_size: 33554432 2026-03-08T23:54:01.387 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.max_write_buffer_number: 2 2026-03-08T23:54:01.387 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.compression: NoCompression 2026-03-08T23:54:01.387 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.bottommost_compression: Disabled 2026-03-08T23:54:01.387 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.prefix_extractor: nullptr 2026-03-08T23:54:01.387 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.memtable_insert_with_hint_prefix_extractor: nullptr 2026-03-08T23:54:01.387 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.num_levels: 7 2026-03-08T23:54:01.388 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.min_write_buffer_number_to_merge: 1 2026-03-08T23:54:01.388 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.max_write_buffer_number_to_maintain: 0 2026-03-08T23:54:01.388 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.max_write_buffer_size_to_maintain: 0 2026-03-08T23:54:01.388 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.bottommost_compression_opts.window_bits: -14 2026-03-08T23:54:01.388 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.bottommost_compression_opts.level: 32767 2026-03-08T23:54:01.388 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.bottommost_compression_opts.strategy: 0 2026-03-08T23:54:01.388 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.bottommost_compression_opts.max_dict_bytes: 0 2026-03-08T23:54:01.388 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.bottommost_compression_opts.zstd_max_train_bytes: 0 2026-03-08T23:54:01.388 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.bottommost_compression_opts.parallel_threads: 1 2026-03-08T23:54:01.388 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.bottommost_compression_opts.enabled: false 2026-03-08T23:54:01.388 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.compression_opts.window_bits: -14 2026-03-08T23:54:01.388 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.compression_opts.level: 32767 2026-03-08T23:54:01.388 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.compression_opts.strategy: 0 2026-03-08T23:54:01.388 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.compression_opts.max_dict_bytes: 0 2026-03-08T23:54:01.388 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.compression_opts.zstd_max_train_bytes: 0 2026-03-08T23:54:01.388 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.compression_opts.parallel_threads: 1 2026-03-08T23:54:01.388 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.compression_opts.enabled: false 2026-03-08T23:54:01.388 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.level0_file_num_compaction_trigger: 4 2026-03-08T23:54:01.388 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.level0_slowdown_writes_trigger: 20 2026-03-08T23:54:01.388 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.level0_stop_writes_trigger: 36 2026-03-08T23:54:01.388 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.target_file_size_base: 67108864 2026-03-08T23:54:01.388 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.target_file_size_multiplier: 1 2026-03-08T23:54:01.388 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.max_bytes_for_level_base: 268435456 2026-03-08T23:54:01.388 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.level_compaction_dynamic_level_bytes: 1 2026-03-08T23:54:01.388 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.max_bytes_for_level_multiplier: 10.000000 2026-03-08T23:54:01.388 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1 2026-03-08T23:54:01.388 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1 2026-03-08T23:54:01.388 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1 2026-03-08T23:54:01.388 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1 2026-03-08T23:54:01.388 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1 2026-03-08T23:54:01.388 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1 2026-03-08T23:54:01.388 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1 2026-03-08T23:54:01.388 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.max_sequential_skip_in_iterations: 8 2026-03-08T23:54:01.388 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.max_compaction_bytes: 1677721600 2026-03-08T23:54:01.388 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.arena_block_size: 4194304 2026-03-08T23:54:01.388 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.soft_pending_compaction_bytes_limit: 68719476736 2026-03-08T23:54:01.388 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.hard_pending_compaction_bytes_limit: 274877906944 2026-03-08T23:54:01.388 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.rate_limit_delay_max_milliseconds: 100 2026-03-08T23:54:01.388 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.disable_auto_compactions: 0 2026-03-08T23:54:01.388 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.compaction_style: kCompactionStyleLevel 2026-03-08T23:54:01.388 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.compaction_pri: kMinOverlappingRatio 2026-03-08T23:54:01.388 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.compaction_options_universal.size_ratio: 1 2026-03-08T23:54:01.388 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.compaction_options_universal.min_merge_width: 2 2026-03-08T23:54:01.388 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295 2026-03-08T23:54:01.388 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200 2026-03-08T23:54:01.389 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1 2026-03-08T23:54:01.389 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize 2026-03-08T23:54:01.389 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824 2026-03-08T23:54:01.389 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0 2026-03-08T23:54:01.389 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.table_properties_collectors: 2026-03-08T23:54:01.389 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.inplace_update_support: 0 2026-03-08T23:54:01.389 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.inplace_update_num_locks: 10000 2026-03-08T23:54:01.389 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.memtable_prefix_bloom_size_ratio: 0.000000 2026-03-08T23:54:01.389 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.memtable_whole_key_filtering: 0 2026-03-08T23:54:01.389 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.memtable_huge_page_size: 0 2026-03-08T23:54:01.389 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.bloom_locality: 0 2026-03-08T23:54:01.389 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.max_successive_merges: 0 2026-03-08T23:54:01.389 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.optimize_filters_for_hits: 0 2026-03-08T23:54:01.389 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.paranoid_file_checks: 0 2026-03-08T23:54:01.389 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.force_consistency_checks: 1 2026-03-08T23:54:01.389 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.report_bg_io_stats: 0 2026-03-08T23:54:01.389 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.ttl: 2592000 2026-03-08T23:54:01.389 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.periodic_compaction_seconds: 0 2026-03-08T23:54:01.389 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.enable_blob_files: false 2026-03-08T23:54:01.389 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.min_blob_size: 0 2026-03-08T23:54:01.389 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.blob_file_size: 268435456 2026-03-08T23:54:01.389 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.blob_compression_type: NoCompression 2026-03-08T23:54:01.389 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.enable_blob_garbage_collection: false 2026-03-08T23:54:01.389 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: Options.blob_garbage_collection_age_cutoff: 0.250000 2026-03-08T23:54:01.389 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: [db/version_set.cc:4773] Recovered from manifest file:/var/lib/ceph/mon/ceph-a/store.db/MANIFEST-000009 succeeded,manifest_file_number is 9, next_file_number is 11, last_sequence is 5, log_number is 5,prev_log_number is 0,max_column_family is 0,min_log_number_to_keep is 0 2026-03-08T23:54:01.389 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: [db/version_set.cc:4782] Column family [default] (ID 0), log number is 5 2026-03-08T23:54:01.389 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: [db/version_set.cc:4083] Creating manifest 13 2026-03-08T23:54:01.389 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: EVENT_LOG_v1 {"time_micros": 1773014041171575, "job": 1, "event": "recovery_started", "wal_files": [10]} 2026-03-08T23:54:01.389 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: [db/db_impl/db_impl_open.cc:847] Recovering log #10 mode 2 2026-03-08T23:54:01.389 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: [table/block_based/filter_policy.cc:996] Using legacy Bloom filter with high (20) bits/key. Dramatic filter space and/or accuracy improvement is available with format_version>=5. 2026-03-08T23:54:01.389 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: EVENT_LOG_v1 {"time_micros": 1773014041173490, "cf_name": "default", "job": 1, "event": "table_file_creation", "file_number": 14, "file_size": 70715, "file_checksum": "", "file_checksum_func_name": "Unknown", "table_properties": {"data_size": 69032, "index_size": 176, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 581, "raw_key_size": 9687, "raw_average_key_size": 49, "raw_value_size": 63601, "raw_average_value_size": 324, "num_data_blocks": 8, "num_entries": 196, "num_deletions": 3, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "rocksdb.BuiltinBloomFilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; ", "creation_time": 1773014041, "oldest_key_time": 0, "file_creation_time": 0, "db_id": "4b47172a-14e0-45d7-8049-a975fd5f3a1c", "db_session_id": "UXCJFN0D1PEEBITB53DH"}} 2026-03-08T23:54:01.389 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: [db/version_set.cc:4083] Creating manifest 15 2026-03-08T23:54:01.389 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: EVENT_LOG_v1 {"time_micros": 1773014041175934, "job": 1, "event": "recovery_finished"} 2026-03-08T23:54:01.389 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: [file/delete_scheduler.cc:73] Deleted file /var/lib/ceph/mon/ceph-a/store.db/000010.log immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000 2026-03-08T23:54:01.389 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: [db/db_impl/db_impl_open.cc:1701] SstFileManager instance 0x55830ecec700 2026-03-08T23:54:01.389 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: rocksdb: DB pointer 0x55830ed60000 2026-03-08T23:54:01.389 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: starting mon.a rank 0 at public addrs [v2:192.168.123.104:3300/0,v1:192.168.123.104:6789/0] at bind addrs [v2:192.168.123.104:3300/0,v1:192.168.123.104:6789/0] mon_data /var/lib/ceph/mon/ceph-a fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 2026-03-08T23:54:01.389 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: mon.a@-1(???) e1 preinit fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 2026-03-08T23:54:01.389 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: mon.a@-1(???).mds e1 new map 2026-03-08T23:54:01.389 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: mon.a@-1(???).mds e1 print_map 2026-03-08T23:54:01.389 INFO:journalctl@ceph.mon.a.vm04.stdout: e1 2026-03-08T23:54:01.389 INFO:journalctl@ceph.mon.a.vm04.stdout: enable_multiple, ever_enabled_multiple: 1,1 2026-03-08T23:54:01.389 INFO:journalctl@ceph.mon.a.vm04.stdout: default compat: compat={},rocompat={},incompat={1=base v0.20,2=client writeable ranges,3=default file layouts on dirs,4=dir inode in separate object,5=mds uses versioned encoding,6=dirfrag is stored in omap,8=no anchor table,9=file layout v2,10=snaprealm v2} 2026-03-08T23:54:01.389 INFO:journalctl@ceph.mon.a.vm04.stdout: legacy client fscid: -1 2026-03-08T23:54:01.390 INFO:journalctl@ceph.mon.a.vm04.stdout: 2026-03-08T23:54:01.390 INFO:journalctl@ceph.mon.a.vm04.stdout: No filesystems configured 2026-03-08T23:54:01.390 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: mon.a@-1(???).osd e1 crush map has features 3314932999778484224, adjusting msgr requires 2026-03-08T23:54:01.390 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: mon.a@-1(???).osd e1 crush map has features 288514050185494528, adjusting msgr requires 2026-03-08T23:54:01.390 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: mon.a@-1(???).osd e1 crush map has features 288514050185494528, adjusting msgr requires 2026-03-08T23:54:01.390 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: mon.a@-1(???).osd e1 crush map has features 288514050185494528, adjusting msgr requires 2026-03-08T23:54:01.390 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: mon.a@-1(???).paxosservice(auth 1..2) refresh upgraded, format 0 -> 3 2026-03-08T23:54:01.390 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: expand_channel_meta expand map: {default=false} 2026-03-08T23:54:01.390 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: expand_channel_meta from 'false' to 'false' 2026-03-08T23:54:01.390 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: expand_channel_meta expanded map: {default=false} 2026-03-08T23:54:01.390 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: expand_channel_meta expand map: {default=info} 2026-03-08T23:54:01.390 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: expand_channel_meta from 'info' to 'info' 2026-03-08T23:54:01.390 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: expand_channel_meta expanded map: {default=info} 2026-03-08T23:54:01.390 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: expand_channel_meta expand map: {default=daemon} 2026-03-08T23:54:01.390 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: expand_channel_meta from 'daemon' to 'daemon' 2026-03-08T23:54:01.390 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: expand_channel_meta expanded map: {default=daemon} 2026-03-08T23:54:01.390 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: expand_channel_meta expand map: {default=debug} 2026-03-08T23:54:01.390 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: expand_channel_meta from 'debug' to 'debug' 2026-03-08T23:54:01.390 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: expand_channel_meta expanded map: {default=debug} 2026-03-08T23:54:01.390 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: mon.a is new leader, mons a in quorum (ranks 0) 2026-03-08T23:54:01.390 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: monmap e1: 1 mons at {a=[v2:192.168.123.104:3300/0,v1:192.168.123.104:6789/0]} 2026-03-08T23:54:01.390 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: fsmap 2026-03-08T23:54:01.390 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: osdmap e1: 0 total, 0 up, 0 in 2026-03-08T23:54:01.390 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:01 vm04 ceph-mon[46823]: mgrmap e1: no daemons active 2026-03-08T23:54:01.390 INFO:teuthology.orchestra.run.vm04.stderr:Wrote config to /etc/ceph/ceph.conf 2026-03-08T23:54:01.390 INFO:teuthology.orchestra.run.vm04.stderr:Wrote keyring to /etc/ceph/ceph.client.admin.keyring 2026-03-08T23:54:01.390 INFO:teuthology.orchestra.run.vm04.stderr:Creating mgr... 2026-03-08T23:54:01.390 INFO:teuthology.orchestra.run.vm04.stderr:Verifying port 9283 ... 2026-03-08T23:54:01.578 INFO:teuthology.orchestra.run.vm04.stderr:systemctl: Failed to reset failed state of unit ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@mgr.y.service: Unit ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@mgr.y.service not loaded. 2026-03-08T23:54:01.588 INFO:teuthology.orchestra.run.vm04.stderr:systemctl: Created symlink /etc/systemd/system/ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9.target.wants/ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@mgr.y.service → /etc/systemd/system/ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@.service. 2026-03-08T23:54:01.950 INFO:teuthology.orchestra.run.vm04.stderr:firewalld does not appear to be present 2026-03-08T23:54:01.950 INFO:teuthology.orchestra.run.vm04.stderr:Not possible to enable service . firewalld.service is not available 2026-03-08T23:54:01.950 INFO:teuthology.orchestra.run.vm04.stderr:firewalld does not appear to be present 2026-03-08T23:54:01.950 INFO:teuthology.orchestra.run.vm04.stderr:Not possible to open ports <[9283]>. firewalld.service is not available 2026-03-08T23:54:01.950 INFO:teuthology.orchestra.run.vm04.stderr:Waiting for mgr to start... 2026-03-08T23:54:01.950 INFO:teuthology.orchestra.run.vm04.stderr:Waiting for mgr... 2026-03-08T23:54:02.290 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: 2026-03-08T23:54:02.290 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: { 2026-03-08T23:54:02.290 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "fsid": "fdcbddf6-1b49-11f1-80b0-7392062373f9", 2026-03-08T23:54:02.290 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "health": { 2026-03-08T23:54:02.290 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "status": "HEALTH_OK", 2026-03-08T23:54:02.290 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "checks": {}, 2026-03-08T23:54:02.290 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "mutes": [] 2026-03-08T23:54:02.290 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: }, 2026-03-08T23:54:02.290 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "election_epoch": 5, 2026-03-08T23:54:02.290 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "quorum": [ 2026-03-08T23:54:02.290 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: 0 2026-03-08T23:54:02.290 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: ], 2026-03-08T23:54:02.290 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "quorum_names": [ 2026-03-08T23:54:02.290 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "a" 2026-03-08T23:54:02.290 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: ], 2026-03-08T23:54:02.290 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "quorum_age": 1, 2026-03-08T23:54:02.290 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "monmap": { 2026-03-08T23:54:02.290 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-08T23:54:02.290 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "min_mon_release_name": "quincy", 2026-03-08T23:54:02.290 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "num_mons": 1 2026-03-08T23:54:02.290 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: }, 2026-03-08T23:54:02.290 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "osdmap": { 2026-03-08T23:54:02.290 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-08T23:54:02.290 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "num_osds": 0, 2026-03-08T23:54:02.290 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "num_up_osds": 0, 2026-03-08T23:54:02.290 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "osd_up_since": 0, 2026-03-08T23:54:02.290 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "num_in_osds": 0, 2026-03-08T23:54:02.290 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "osd_in_since": 0, 2026-03-08T23:54:02.290 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "num_remapped_pgs": 0 2026-03-08T23:54:02.290 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: }, 2026-03-08T23:54:02.290 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "pgmap": { 2026-03-08T23:54:02.290 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "pgs_by_state": [], 2026-03-08T23:54:02.290 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "num_pgs": 0, 2026-03-08T23:54:02.290 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "num_pools": 0, 2026-03-08T23:54:02.290 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "num_objects": 0, 2026-03-08T23:54:02.291 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "data_bytes": 0, 2026-03-08T23:54:02.291 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "bytes_used": 0, 2026-03-08T23:54:02.291 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "bytes_avail": 0, 2026-03-08T23:54:02.291 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "bytes_total": 0 2026-03-08T23:54:02.291 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: }, 2026-03-08T23:54:02.291 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "fsmap": { 2026-03-08T23:54:02.291 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-08T23:54:02.291 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "by_rank": [], 2026-03-08T23:54:02.291 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "up:standby": 0 2026-03-08T23:54:02.291 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: }, 2026-03-08T23:54:02.291 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "mgrmap": { 2026-03-08T23:54:02.291 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "available": false, 2026-03-08T23:54:02.291 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "num_standbys": 0, 2026-03-08T23:54:02.291 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "modules": [ 2026-03-08T23:54:02.291 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "iostat", 2026-03-08T23:54:02.291 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "nfs", 2026-03-08T23:54:02.291 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "restful" 2026-03-08T23:54:02.291 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: ], 2026-03-08T23:54:02.291 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "services": {} 2026-03-08T23:54:02.291 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: }, 2026-03-08T23:54:02.291 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "servicemap": { 2026-03-08T23:54:02.291 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-08T23:54:02.291 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "modified": "2026-03-08T23:53:59.991364+0000", 2026-03-08T23:54:02.291 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "services": {} 2026-03-08T23:54:02.291 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: }, 2026-03-08T23:54:02.291 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "progress_events": {} 2026-03-08T23:54:02.291 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: } 2026-03-08T23:54:02.324 INFO:teuthology.orchestra.run.vm04.stderr:mgr not available, waiting (1/15)... 2026-03-08T23:54:02.395 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:02 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/1393121212' entity='client.admin' 2026-03-08T23:54:02.395 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:02 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/3616289215' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-08T23:54:04.200 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:54:04 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-08T23:54:04.138+0000 7f3e541b2000 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-08T23:54:04.452 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:54:04 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-08T23:54:04.356+0000 7f3e541b2000 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-08T23:54:04.614 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: 2026-03-08T23:54:04.614 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: { 2026-03-08T23:54:04.614 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "fsid": "fdcbddf6-1b49-11f1-80b0-7392062373f9", 2026-03-08T23:54:04.614 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "health": { 2026-03-08T23:54:04.614 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "status": "HEALTH_OK", 2026-03-08T23:54:04.614 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "checks": {}, 2026-03-08T23:54:04.614 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "mutes": [] 2026-03-08T23:54:04.614 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: }, 2026-03-08T23:54:04.614 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "election_epoch": 5, 2026-03-08T23:54:04.614 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "quorum": [ 2026-03-08T23:54:04.614 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: 0 2026-03-08T23:54:04.614 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: ], 2026-03-08T23:54:04.614 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "quorum_names": [ 2026-03-08T23:54:04.614 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "a" 2026-03-08T23:54:04.614 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: ], 2026-03-08T23:54:04.614 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "quorum_age": 3, 2026-03-08T23:54:04.614 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "monmap": { 2026-03-08T23:54:04.614 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-08T23:54:04.614 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "min_mon_release_name": "quincy", 2026-03-08T23:54:04.614 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "num_mons": 1 2026-03-08T23:54:04.614 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: }, 2026-03-08T23:54:04.614 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "osdmap": { 2026-03-08T23:54:04.614 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-08T23:54:04.614 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "num_osds": 0, 2026-03-08T23:54:04.615 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "num_up_osds": 0, 2026-03-08T23:54:04.615 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "osd_up_since": 0, 2026-03-08T23:54:04.615 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "num_in_osds": 0, 2026-03-08T23:54:04.615 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "osd_in_since": 0, 2026-03-08T23:54:04.615 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "num_remapped_pgs": 0 2026-03-08T23:54:04.615 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: }, 2026-03-08T23:54:04.615 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "pgmap": { 2026-03-08T23:54:04.615 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "pgs_by_state": [], 2026-03-08T23:54:04.615 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "num_pgs": 0, 2026-03-08T23:54:04.615 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "num_pools": 0, 2026-03-08T23:54:04.615 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "num_objects": 0, 2026-03-08T23:54:04.615 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "data_bytes": 0, 2026-03-08T23:54:04.615 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "bytes_used": 0, 2026-03-08T23:54:04.615 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "bytes_avail": 0, 2026-03-08T23:54:04.615 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "bytes_total": 0 2026-03-08T23:54:04.615 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: }, 2026-03-08T23:54:04.615 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "fsmap": { 2026-03-08T23:54:04.615 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-08T23:54:04.615 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "by_rank": [], 2026-03-08T23:54:04.615 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "up:standby": 0 2026-03-08T23:54:04.615 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: }, 2026-03-08T23:54:04.615 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "mgrmap": { 2026-03-08T23:54:04.615 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "available": false, 2026-03-08T23:54:04.615 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "num_standbys": 0, 2026-03-08T23:54:04.615 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "modules": [ 2026-03-08T23:54:04.615 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "iostat", 2026-03-08T23:54:04.615 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "nfs", 2026-03-08T23:54:04.615 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "restful" 2026-03-08T23:54:04.615 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: ], 2026-03-08T23:54:04.615 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "services": {} 2026-03-08T23:54:04.615 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: }, 2026-03-08T23:54:04.615 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "servicemap": { 2026-03-08T23:54:04.615 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-08T23:54:04.615 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "modified": "2026-03-08T23:53:59.991364+0000", 2026-03-08T23:54:04.615 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "services": {} 2026-03-08T23:54:04.615 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: }, 2026-03-08T23:54:04.615 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "progress_events": {} 2026-03-08T23:54:04.615 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: } 2026-03-08T23:54:04.640 INFO:teuthology.orchestra.run.vm04.stderr:mgr not available, waiting (2/15)... 2026-03-08T23:54:04.704 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:54:04 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-08T23:54:04.451+0000 7f3e541b2000 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-08T23:54:04.704 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:54:04 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-08T23:54:04.601+0000 7f3e541b2000 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-08T23:54:04.704 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:54:04 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-08T23:54:04.696+0000 7f3e541b2000 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-08T23:54:04.704 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:04 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/2097967146' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-08T23:54:04.957 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:54:04 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-08T23:54:04.756+0000 7f3e541b2000 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-08T23:54:05.207 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:54:05 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-08T23:54:05.103+0000 7f3e541b2000 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-08T23:54:05.207 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:54:05 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-08T23:54:05.193+0000 7f3e541b2000 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-08T23:54:05.969 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:54:05 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-08T23:54:05.885+0000 7f3e541b2000 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-08T23:54:05.970 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:54:05 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-08T23:54:05.956+0000 7f3e541b2000 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-08T23:54:06.220 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:54:06 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-08T23:54:06.036+0000 7f3e541b2000 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-08T23:54:06.220 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:54:06 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-08T23:54:06.181+0000 7f3e541b2000 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-08T23:54:06.473 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:54:06 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-08T23:54:06.256+0000 7f3e541b2000 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-08T23:54:06.473 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:54:06 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-08T23:54:06.367+0000 7f3e541b2000 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-08T23:54:06.473 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:54:06 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-08T23:54:06.473+0000 7f3e541b2000 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-08T23:54:06.950 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: 2026-03-08T23:54:06.950 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: { 2026-03-08T23:54:06.950 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "fsid": "fdcbddf6-1b49-11f1-80b0-7392062373f9", 2026-03-08T23:54:06.951 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "health": { 2026-03-08T23:54:06.951 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "status": "HEALTH_OK", 2026-03-08T23:54:06.951 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "checks": {}, 2026-03-08T23:54:06.951 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "mutes": [] 2026-03-08T23:54:06.951 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: }, 2026-03-08T23:54:06.951 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "election_epoch": 5, 2026-03-08T23:54:06.951 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "quorum": [ 2026-03-08T23:54:06.951 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: 0 2026-03-08T23:54:06.951 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: ], 2026-03-08T23:54:06.951 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "quorum_names": [ 2026-03-08T23:54:06.951 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "a" 2026-03-08T23:54:06.951 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: ], 2026-03-08T23:54:06.951 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "quorum_age": 5, 2026-03-08T23:54:06.951 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "monmap": { 2026-03-08T23:54:06.951 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-08T23:54:06.951 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "min_mon_release_name": "quincy", 2026-03-08T23:54:06.951 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "num_mons": 1 2026-03-08T23:54:06.951 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: }, 2026-03-08T23:54:06.951 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "osdmap": { 2026-03-08T23:54:06.951 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-08T23:54:06.951 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "num_osds": 0, 2026-03-08T23:54:06.951 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "num_up_osds": 0, 2026-03-08T23:54:06.951 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "osd_up_since": 0, 2026-03-08T23:54:06.951 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "num_in_osds": 0, 2026-03-08T23:54:06.951 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "osd_in_since": 0, 2026-03-08T23:54:06.951 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "num_remapped_pgs": 0 2026-03-08T23:54:06.951 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: }, 2026-03-08T23:54:06.951 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "pgmap": { 2026-03-08T23:54:06.951 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "pgs_by_state": [], 2026-03-08T23:54:06.951 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "num_pgs": 0, 2026-03-08T23:54:06.951 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "num_pools": 0, 2026-03-08T23:54:06.951 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "num_objects": 0, 2026-03-08T23:54:06.951 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "data_bytes": 0, 2026-03-08T23:54:06.951 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "bytes_used": 0, 2026-03-08T23:54:06.951 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "bytes_avail": 0, 2026-03-08T23:54:06.951 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "bytes_total": 0 2026-03-08T23:54:06.951 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: }, 2026-03-08T23:54:06.951 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "fsmap": { 2026-03-08T23:54:06.951 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-08T23:54:06.951 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "by_rank": [], 2026-03-08T23:54:06.951 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "up:standby": 0 2026-03-08T23:54:06.951 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: }, 2026-03-08T23:54:06.951 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "mgrmap": { 2026-03-08T23:54:06.951 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "available": false, 2026-03-08T23:54:06.951 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "num_standbys": 0, 2026-03-08T23:54:06.951 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "modules": [ 2026-03-08T23:54:06.951 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "iostat", 2026-03-08T23:54:06.951 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "nfs", 2026-03-08T23:54:06.951 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "restful" 2026-03-08T23:54:06.951 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: ], 2026-03-08T23:54:06.951 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "services": {} 2026-03-08T23:54:06.951 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: }, 2026-03-08T23:54:06.951 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "servicemap": { 2026-03-08T23:54:06.951 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-08T23:54:06.951 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "modified": "2026-03-08T23:53:59.991364+0000", 2026-03-08T23:54:06.951 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "services": {} 2026-03-08T23:54:06.951 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: }, 2026-03-08T23:54:06.951 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "progress_events": {} 2026-03-08T23:54:06.951 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: } 2026-03-08T23:54:06.982 INFO:teuthology.orchestra.run.vm04.stderr:mgr not available, waiting (3/15)... 2026-03-08T23:54:07.229 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:54:07 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-08T23:54:07.018+0000 7f3e541b2000 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-08T23:54:07.229 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:54:07 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-08T23:54:07.084+0000 7f3e541b2000 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-08T23:54:07.229 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:07 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/2980012117' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-08T23:54:07.229 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:07 vm04 ceph-mon[46823]: Activating manager daemon y 2026-03-08T23:54:07.229 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:07 vm04 ceph-mon[46823]: mgrmap e2: y(active, starting, since 0.00404125s) 2026-03-08T23:54:07.229 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:07 vm04 ceph-mon[46823]: from='mgr.14100 192.168.123.104:0/1394689324' entity='mgr.y' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-08T23:54:07.229 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:07 vm04 ceph-mon[46823]: from='mgr.14100 192.168.123.104:0/1394689324' entity='mgr.y' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-08T23:54:07.229 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:07 vm04 ceph-mon[46823]: from='mgr.14100 192.168.123.104:0/1394689324' entity='mgr.y' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-08T23:54:07.229 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:07 vm04 ceph-mon[46823]: from='mgr.14100 192.168.123.104:0/1394689324' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-08T23:54:07.229 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:07 vm04 ceph-mon[46823]: from='mgr.14100 192.168.123.104:0/1394689324' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-08T23:54:07.229 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:07 vm04 ceph-mon[46823]: Manager daemon y is now available 2026-03-08T23:54:07.229 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:07 vm04 ceph-mon[46823]: from='mgr.14100 192.168.123.104:0/1394689324' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:54:07.229 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:07 vm04 ceph-mon[46823]: from='mgr.14100 192.168.123.104:0/1394689324' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-08T23:54:07.229 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:07 vm04 ceph-mon[46823]: from='mgr.14100 192.168.123.104:0/1394689324' entity='mgr.y' 2026-03-08T23:54:07.229 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:07 vm04 ceph-mon[46823]: from='mgr.14100 192.168.123.104:0/1394689324' entity='mgr.y' 2026-03-08T23:54:07.229 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:07 vm04 ceph-mon[46823]: from='mgr.14100 192.168.123.104:0/1394689324' entity='mgr.y' 2026-03-08T23:54:09.253 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:09 vm04 ceph-mon[46823]: mgrmap e3: y(active, since 1.00836s) 2026-03-08T23:54:09.288 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: 2026-03-08T23:54:09.291 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: { 2026-03-08T23:54:09.291 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "fsid": "fdcbddf6-1b49-11f1-80b0-7392062373f9", 2026-03-08T23:54:09.291 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "health": { 2026-03-08T23:54:09.291 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "status": "HEALTH_OK", 2026-03-08T23:54:09.291 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "checks": {}, 2026-03-08T23:54:09.291 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "mutes": [] 2026-03-08T23:54:09.291 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: }, 2026-03-08T23:54:09.291 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "election_epoch": 5, 2026-03-08T23:54:09.291 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "quorum": [ 2026-03-08T23:54:09.291 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: 0 2026-03-08T23:54:09.291 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: ], 2026-03-08T23:54:09.291 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "quorum_names": [ 2026-03-08T23:54:09.291 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "a" 2026-03-08T23:54:09.291 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: ], 2026-03-08T23:54:09.291 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "quorum_age": 8, 2026-03-08T23:54:09.291 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "monmap": { 2026-03-08T23:54:09.291 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-08T23:54:09.291 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "min_mon_release_name": "quincy", 2026-03-08T23:54:09.291 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "num_mons": 1 2026-03-08T23:54:09.291 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: }, 2026-03-08T23:54:09.291 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "osdmap": { 2026-03-08T23:54:09.291 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-08T23:54:09.291 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "num_osds": 0, 2026-03-08T23:54:09.291 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "num_up_osds": 0, 2026-03-08T23:54:09.291 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "osd_up_since": 0, 2026-03-08T23:54:09.292 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "num_in_osds": 0, 2026-03-08T23:54:09.292 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "osd_in_since": 0, 2026-03-08T23:54:09.292 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "num_remapped_pgs": 0 2026-03-08T23:54:09.292 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: }, 2026-03-08T23:54:09.292 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "pgmap": { 2026-03-08T23:54:09.292 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "pgs_by_state": [], 2026-03-08T23:54:09.292 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "num_pgs": 0, 2026-03-08T23:54:09.292 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "num_pools": 0, 2026-03-08T23:54:09.292 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "num_objects": 0, 2026-03-08T23:54:09.292 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "data_bytes": 0, 2026-03-08T23:54:09.292 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "bytes_used": 0, 2026-03-08T23:54:09.292 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "bytes_avail": 0, 2026-03-08T23:54:09.292 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "bytes_total": 0 2026-03-08T23:54:09.292 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: }, 2026-03-08T23:54:09.292 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "fsmap": { 2026-03-08T23:54:09.292 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-08T23:54:09.292 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "by_rank": [], 2026-03-08T23:54:09.292 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "up:standby": 0 2026-03-08T23:54:09.292 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: }, 2026-03-08T23:54:09.292 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "mgrmap": { 2026-03-08T23:54:09.292 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "available": true, 2026-03-08T23:54:09.292 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "num_standbys": 0, 2026-03-08T23:54:09.292 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "modules": [ 2026-03-08T23:54:09.292 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "iostat", 2026-03-08T23:54:09.292 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "nfs", 2026-03-08T23:54:09.292 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "restful" 2026-03-08T23:54:09.292 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: ], 2026-03-08T23:54:09.292 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "services": {} 2026-03-08T23:54:09.292 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: }, 2026-03-08T23:54:09.292 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "servicemap": { 2026-03-08T23:54:09.292 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-08T23:54:09.292 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "modified": "2026-03-08T23:53:59.991364+0000", 2026-03-08T23:54:09.292 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "services": {} 2026-03-08T23:54:09.292 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: }, 2026-03-08T23:54:09.292 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "progress_events": {} 2026-03-08T23:54:09.292 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: } 2026-03-08T23:54:09.355 INFO:teuthology.orchestra.run.vm04.stderr:mgr is available 2026-03-08T23:54:09.627 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: 2026-03-08T23:54:09.627 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: [global] 2026-03-08T23:54:09.627 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: fsid = fdcbddf6-1b49-11f1-80b0-7392062373f9 2026-03-08T23:54:09.627 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: mon_osd_allow_pg_remap = true 2026-03-08T23:54:09.627 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: mon_osd_allow_primary_affinity = true 2026-03-08T23:54:09.627 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: mon_warn_on_no_sortbitwise = false 2026-03-08T23:54:09.627 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: osd_crush_chooseleaf_type = 0 2026-03-08T23:54:09.627 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: 2026-03-08T23:54:09.627 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: [mgr] 2026-03-08T23:54:09.627 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: mgr/telemetry/nag = false 2026-03-08T23:54:09.627 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: 2026-03-08T23:54:09.627 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: [osd] 2026-03-08T23:54:09.627 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: osd_map_max_advance = 10 2026-03-08T23:54:09.627 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: osd_mclock_iops_capacity_threshold_hdd = 49000 2026-03-08T23:54:09.627 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: osd_sloppy_crc = true 2026-03-08T23:54:09.658 INFO:teuthology.orchestra.run.vm04.stderr:Enabling cephadm module... 2026-03-08T23:54:10.263 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:10 vm04 ceph-mon[46823]: mgrmap e4: y(active, since 2s) 2026-03-08T23:54:10.263 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:10 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/1334107004' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-08T23:54:10.263 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:10 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/2765347664' entity='client.admin' cmd=[{"prefix": "config assimilate-conf"}]: dispatch 2026-03-08T23:54:10.263 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:10 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/2765347664' entity='client.admin' cmd='[{"prefix": "config assimilate-conf"}]': finished 2026-03-08T23:54:10.263 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:10 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/4015523670' entity='client.admin' cmd=[{"prefix": "mgr module enable", "module": "cephadm"}]: dispatch 2026-03-08T23:54:10.777 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:54:10 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: ignoring --setuser ceph since I am not root 2026-03-08T23:54:10.777 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:54:10 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: ignoring --setgroup ceph since I am not root 2026-03-08T23:54:11.022 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: { 2026-03-08T23:54:11.022 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "epoch": 5, 2026-03-08T23:54:11.026 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "available": true, 2026-03-08T23:54:11.026 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "active_name": "y", 2026-03-08T23:54:11.026 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "num_standby": 0 2026-03-08T23:54:11.026 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: } 2026-03-08T23:54:11.031 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:54:10 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-08T23:54:10.837+0000 7fe7f0489000 -1 mgr[py] Module alerts has missing NOTIFY_TYPES member 2026-03-08T23:54:11.031 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:54:10 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-08T23:54:10.934+0000 7fe7f0489000 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-08T23:54:11.072 INFO:teuthology.orchestra.run.vm04.stderr:Waiting for the mgr to restart... 2026-03-08T23:54:11.072 INFO:teuthology.orchestra.run.vm04.stderr:Waiting for mgr epoch 5... 2026-03-08T23:54:11.784 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:54:11 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-08T23:54:11.542+0000 7fe7f0489000 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-08T23:54:11.784 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:11 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/4015523670' entity='client.admin' cmd='[{"prefix": "mgr module enable", "module": "cephadm"}]': finished 2026-03-08T23:54:11.784 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:11 vm04 ceph-mon[46823]: mgrmap e5: y(active, since 3s) 2026-03-08T23:54:11.784 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:11 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/287678608' entity='client.admin' cmd=[{"prefix": "mgr stat"}]: dispatch 2026-03-08T23:54:12.034 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:54:11 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-08T23:54:11.934+0000 7fe7f0489000 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-08T23:54:12.289 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:54:12 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-08T23:54:12.127+0000 7fe7f0489000 -1 mgr[py] Module diskprediction_local has missing NOTIFY_TYPES member 2026-03-08T23:54:12.289 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:54:12 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-08T23:54:12.192+0000 7fe7f0489000 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-08T23:54:12.541 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:54:12 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-08T23:54:12.373+0000 7fe7f0489000 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-08T23:54:13.046 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:54:12 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-08T23:54:12.996+0000 7fe7f0489000 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-08T23:54:13.297 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:54:13 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-08T23:54:13.195+0000 7fe7f0489000 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-08T23:54:13.297 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:54:13 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-08T23:54:13.257+0000 7fe7f0489000 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-08T23:54:13.548 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:54:13 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-08T23:54:13.316+0000 7fe7f0489000 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-08T23:54:13.548 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:54:13 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-08T23:54:13.380+0000 7fe7f0489000 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-08T23:54:13.548 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:54:13 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-08T23:54:13.444+0000 7fe7f0489000 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-08T23:54:13.801 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:54:13 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-08T23:54:13.789+0000 7fe7f0489000 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-08T23:54:14.058 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:54:13 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-08T23:54:13.873+0000 7fe7f0489000 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-08T23:54:14.561 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:54:14 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-08T23:54:14.488+0000 7fe7f0489000 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-08T23:54:14.561 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:54:14 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-08T23:54:14.551+0000 7fe7f0489000 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-08T23:54:14.812 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:54:14 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-08T23:54:14.624+0000 7fe7f0489000 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-08T23:54:14.813 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:54:14 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-08T23:54:14.759+0000 7fe7f0489000 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-08T23:54:15.094 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:54:14 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-08T23:54:14.825+0000 7fe7f0489000 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-08T23:54:15.094 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:54:14 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-08T23:54:14.932+0000 7fe7f0489000 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-08T23:54:15.094 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:54:15 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-08T23:54:15.034+0000 7fe7f0489000 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-08T23:54:15.598 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:54:15 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-08T23:54:15.370+0000 7fe7f0489000 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-08T23:54:15.598 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:54:15 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-08T23:54:15.434+0000 7fe7f0489000 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-08T23:54:15.599 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:15 vm04 ceph-mon[46823]: Active manager daemon y restarted 2026-03-08T23:54:15.599 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:15 vm04 ceph-mon[46823]: Activating manager daemon y 2026-03-08T23:54:15.599 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:15 vm04 ceph-mon[46823]: osdmap e2: 0 total, 0 up, 0 in 2026-03-08T23:54:16.515 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: { 2026-03-08T23:54:16.515 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "mgrmap_epoch": 7, 2026-03-08T23:54:16.515 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "initialized": true 2026-03-08T23:54:16.515 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: } 2026-03-08T23:54:16.545 INFO:teuthology.orchestra.run.vm04.stderr:mgr epoch 5 is available 2026-03-08T23:54:16.546 INFO:teuthology.orchestra.run.vm04.stderr:Setting orchestrator backend to cephadm... 2026-03-08T23:54:16.606 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:16 vm04 ceph-mon[46823]: mgrmap e6: y(active, starting, since 0.0545811s) 2026-03-08T23:54:16.606 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:16 vm04 ceph-mon[46823]: from='mgr.14120 192.168.123.104:0/1762411969' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-08T23:54:16.606 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:16 vm04 ceph-mon[46823]: from='mgr.14120 192.168.123.104:0/1762411969' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-08T23:54:16.606 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:16 vm04 ceph-mon[46823]: from='mgr.14120 192.168.123.104:0/1762411969' entity='mgr.y' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-08T23:54:16.606 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:16 vm04 ceph-mon[46823]: from='mgr.14120 192.168.123.104:0/1762411969' entity='mgr.y' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-08T23:54:16.606 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:16 vm04 ceph-mon[46823]: from='mgr.14120 192.168.123.104:0/1762411969' entity='mgr.y' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-08T23:54:16.606 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:16 vm04 ceph-mon[46823]: Manager daemon y is now available 2026-03-08T23:54:16.606 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:16 vm04 ceph-mon[46823]: from='mgr.14120 192.168.123.104:0/1762411969' entity='mgr.y' 2026-03-08T23:54:16.606 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:16 vm04 ceph-mon[46823]: from='mgr.14120 192.168.123.104:0/1762411969' entity='mgr.y' 2026-03-08T23:54:16.606 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:16 vm04 ceph-mon[46823]: from='mgr.14120 192.168.123.104:0/1762411969' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:54:16.606 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:16 vm04 ceph-mon[46823]: from='mgr.14120 192.168.123.104:0/1762411969' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:54:16.606 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:16 vm04 ceph-mon[46823]: from='mgr.14120 192.168.123.104:0/1762411969' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:54:16.606 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:16 vm04 ceph-mon[46823]: from='mgr.14120 192.168.123.104:0/1762411969' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:54:16.606 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:16 vm04 ceph-mon[46823]: from='mgr.14120 192.168.123.104:0/1762411969' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-08T23:54:16.606 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:54:16 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: [08/Mar/2026:23:54:16] ENGINE Bus STARTING 2026-03-08T23:54:16.606 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:54:16 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: [08/Mar/2026:23:54:16] ENGINE Serving on https://192.168.123.104:7150 2026-03-08T23:54:16.606 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:54:16 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: [08/Mar/2026:23:54:16] ENGINE Bus STARTED 2026-03-08T23:54:17.175 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: value unchanged 2026-03-08T23:54:17.232 INFO:teuthology.orchestra.run.vm04.stderr:Generating ssh key... 2026-03-08T23:54:17.627 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:54:17 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: Generating public/private rsa key pair. 2026-03-08T23:54:17.627 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:54:17 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: Your identification has been saved in /tmp/tmpqby_8raq/key. 2026-03-08T23:54:17.627 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:54:17 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: Your public key has been saved in /tmp/tmpqby_8raq/key.pub. 2026-03-08T23:54:17.627 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:54:17 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: The key fingerprint is: 2026-03-08T23:54:17.627 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:54:17 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: SHA256:AQ3SuQuDFK7AoXzw45mN1BmNqL/hSIU1C9gC1YZHkYs ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9 2026-03-08T23:54:17.627 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:54:17 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: The key's randomart image is: 2026-03-08T23:54:17.627 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:54:17 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: +---[RSA 3072]----+ 2026-03-08T23:54:17.627 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:54:17 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: |+=o====+ | 2026-03-08T23:54:17.628 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:54:17 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: |*oB+*o=o. | 2026-03-08T23:54:17.628 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:54:17 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: |o=*Xo+ .. | 2026-03-08T23:54:17.628 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:54:17 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: |.+EoX . . | 2026-03-08T23:54:17.628 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:54:17 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: |. o= + .S | 2026-03-08T23:54:17.628 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:54:17 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: | . o . | 2026-03-08T23:54:17.628 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:54:17 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: |. o o | 2026-03-08T23:54:17.628 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:54:17 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: | . o | 2026-03-08T23:54:17.628 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:54:17 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: | | 2026-03-08T23:54:17.628 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:54:17 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: +----[SHA256]-----+ 2026-03-08T23:54:17.628 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:17 vm04 ceph-mon[46823]: [08/Mar/2026:23:54:16] ENGINE Bus STARTING 2026-03-08T23:54:17.628 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:17 vm04 ceph-mon[46823]: mgrmap e7: y(active, since 1.07001s) 2026-03-08T23:54:17.628 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:17 vm04 ceph-mon[46823]: from='client.14124 -' entity='client.admin' cmd=[{"prefix": "get_command_descriptions"}]: dispatch 2026-03-08T23:54:17.628 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:17 vm04 ceph-mon[46823]: from='client.14124 -' entity='client.admin' cmd=[{"prefix": "mgr_status"}]: dispatch 2026-03-08T23:54:17.628 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:17 vm04 ceph-mon[46823]: [08/Mar/2026:23:54:16] ENGINE Serving on https://192.168.123.104:7150 2026-03-08T23:54:17.628 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:17 vm04 ceph-mon[46823]: [08/Mar/2026:23:54:16] ENGINE Bus STARTED 2026-03-08T23:54:17.628 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:17 vm04 ceph-mon[46823]: from='mgr.14120 192.168.123.104:0/1762411969' entity='mgr.y' 2026-03-08T23:54:17.628 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:17 vm04 ceph-mon[46823]: from='mgr.14120 192.168.123.104:0/1762411969' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:54:17.628 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:17 vm04 ceph-mon[46823]: from='mgr.14120 192.168.123.104:0/1762411969' entity='mgr.y' 2026-03-08T23:54:17.629 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:17 vm04 ceph-mon[46823]: from='mgr.14120 192.168.123.104:0/1762411969' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:54:17.859 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDeYN83xvH6q3BQIfs0bSGWdk/pkE3Ip18A79tUNXHHUxi/HiEWqScRO+Xk/L58R0h+sN6hG0s0qXb8GXTZj5QnaQge9vQWss4JevCNEV50IgkjrCwPURkOQs8f7vXuyT3YMNxTW/DIRseP7E8bY/yn/GCP+VKGq9n3MNAR10PjH8gyZJWZec1VBQZEWE+ShubRoY/K9N00WkjQ0y15M9C/wm9Ml76HFHMbILb21U/2MVe3+JJFVYNVMMJ6hw5N0x4ESAYXJJEPouJNKjLXQgzMjIOuAl7DHbXkPfXunAwTUco/L8JHYTeABzoJKw3Ak0yti2P0okrk2qOxGJeeJf5YHNz6sRMRJ/AutAKwKjsy63uUN4ScMKKOKG2FSPrwXTY4srF2O8rCSt2d20IzM6Cp8rlwe2vkT6uiO5f579c8x2Rt7k5eRTOHp9pij2v+TR4V3jPtggQkiwvwdQoauW0emxuLm8z0aMG7D7MutDBUhBrqewn/H95PHiEoK+lbpSM= ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9 2026-03-08T23:54:17.914 INFO:teuthology.orchestra.run.vm04.stderr:Wrote public SSH key to /home/ubuntu/cephtest/ceph.pub 2026-03-08T23:54:17.914 INFO:teuthology.orchestra.run.vm04.stderr:Adding key to root@localhost authorized_keys... 2026-03-08T23:54:17.914 INFO:teuthology.orchestra.run.vm04.stderr:Adding host vm04... 2026-03-08T23:54:18.697 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:18 vm04 ceph-mon[46823]: from='client.14132 -' entity='client.admin' cmd=[{"prefix": "orch set backend", "module_name": "cephadm", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:54:18.697 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:18 vm04 ceph-mon[46823]: from='client.14134 -' entity='client.admin' cmd=[{"prefix": "cephadm set-user", "user": "root", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:54:18.697 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:18 vm04 ceph-mon[46823]: from='client.14136 -' entity='client.admin' cmd=[{"prefix": "cephadm generate-key", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:54:18.697 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:18 vm04 ceph-mon[46823]: Generating ssh key... 2026-03-08T23:54:18.697 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:18 vm04 ceph-mon[46823]: from='mgr.14120 192.168.123.104:0/1762411969' entity='mgr.y' 2026-03-08T23:54:18.697 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:18 vm04 ceph-mon[46823]: from='mgr.14120 192.168.123.104:0/1762411969' entity='mgr.y' 2026-03-08T23:54:18.697 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:18 vm04 ceph-mon[46823]: mgrmap e8: y(active, since 2s) 2026-03-08T23:54:18.854 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: Added host 'vm04' with addr '192.168.123.104' 2026-03-08T23:54:18.909 INFO:teuthology.orchestra.run.vm04.stderr:Deploying unmanaged mon service... 2026-03-08T23:54:19.192 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: Scheduled mon update... 2026-03-08T23:54:19.224 INFO:teuthology.orchestra.run.vm04.stderr:Deploying unmanaged mgr service... 2026-03-08T23:54:19.492 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: Scheduled mgr update... 2026-03-08T23:54:19.767 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:19 vm04 ceph-mon[46823]: from='client.14138 -' entity='client.admin' cmd=[{"prefix": "cephadm get-pub-key", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:54:19.767 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:19 vm04 ceph-mon[46823]: from='client.14140 -' entity='client.admin' cmd=[{"prefix": "orch host add", "hostname": "vm04", "addr": "192.168.123.104", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:54:19.767 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:19 vm04 ceph-mon[46823]: Deploying cephadm binary to vm04 2026-03-08T23:54:19.767 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:19 vm04 ceph-mon[46823]: from='mgr.14120 192.168.123.104:0/1762411969' entity='mgr.y' 2026-03-08T23:54:19.767 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:19 vm04 ceph-mon[46823]: from='mgr.14120 192.168.123.104:0/1762411969' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:54:19.767 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:19 vm04 ceph-mon[46823]: from='mgr.14120 192.168.123.104:0/1762411969' entity='mgr.y' 2026-03-08T23:54:19.767 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:19 vm04 ceph-mon[46823]: from='mgr.14120 192.168.123.104:0/1762411969' entity='mgr.y' 2026-03-08T23:54:20.054 INFO:teuthology.orchestra.run.vm04.stderr:Enabling the dashboard module... 2026-03-08T23:54:20.781 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:20 vm04 ceph-mon[46823]: Added host vm04 2026-03-08T23:54:20.781 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:20 vm04 ceph-mon[46823]: from='client.14142 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mon", "unmanaged": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:54:20.781 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:20 vm04 ceph-mon[46823]: Saving service mon spec with placement count:5 2026-03-08T23:54:20.781 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:20 vm04 ceph-mon[46823]: from='client.14144 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mgr", "unmanaged": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:54:20.781 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:20 vm04 ceph-mon[46823]: Saving service mgr spec with placement count:2 2026-03-08T23:54:20.781 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:20 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/2998055250' entity='client.admin' 2026-03-08T23:54:20.781 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:20 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/3911216841' entity='client.admin' 2026-03-08T23:54:20.781 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:20 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/2096311575' entity='client.admin' cmd=[{"prefix": "mgr module enable", "module": "dashboard"}]: dispatch 2026-03-08T23:54:21.298 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:54:21 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: ignoring --setuser ceph since I am not root 2026-03-08T23:54:21.299 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:54:21 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: ignoring --setgroup ceph since I am not root 2026-03-08T23:54:21.299 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:54:21 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-08T23:54:21.213+0000 7f5765012000 -1 mgr[py] Module alerts has missing NOTIFY_TYPES member 2026-03-08T23:54:21.299 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:54:21 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-08T23:54:21.297+0000 7f5765012000 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-08T23:54:21.534 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: { 2026-03-08T23:54:21.535 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "epoch": 9, 2026-03-08T23:54:21.535 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "available": true, 2026-03-08T23:54:21.535 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "active_name": "y", 2026-03-08T23:54:21.535 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "num_standby": 0 2026-03-08T23:54:21.535 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: } 2026-03-08T23:54:21.584 INFO:teuthology.orchestra.run.vm04.stderr:Waiting for the mgr to restart... 2026-03-08T23:54:21.584 INFO:teuthology.orchestra.run.vm04.stderr:Waiting for mgr epoch 9... 2026-03-08T23:54:21.813 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:21 vm04 ceph-mon[46823]: from='mgr.14120 192.168.123.104:0/1762411969' entity='mgr.y' 2026-03-08T23:54:21.813 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:21 vm04 ceph-mon[46823]: from='mgr.14120 192.168.123.104:0/1762411969' entity='mgr.y' 2026-03-08T23:54:21.813 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:21 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/2096311575' entity='client.admin' cmd='[{"prefix": "mgr module enable", "module": "dashboard"}]': finished 2026-03-08T23:54:21.813 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:21 vm04 ceph-mon[46823]: mgrmap e9: y(active, since 5s) 2026-03-08T23:54:21.813 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:21 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/1696376398' entity='client.admin' cmd=[{"prefix": "mgr stat"}]: dispatch 2026-03-08T23:54:22.100 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:54:21 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-08T23:54:21.897+0000 7f5765012000 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-08T23:54:22.600 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:54:22 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-08T23:54:22.278+0000 7f5765012000 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-08T23:54:22.600 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:54:22 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-08T23:54:22.429+0000 7f5765012000 -1 mgr[py] Module diskprediction_local has missing NOTIFY_TYPES member 2026-03-08T23:54:22.600 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:54:22 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-08T23:54:22.485+0000 7f5765012000 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-08T23:54:23.100 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:54:22 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-08T23:54:22.668+0000 7f5765012000 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-08T23:54:23.534 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:54:23 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-08T23:54:23.275+0000 7f5765012000 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-08T23:54:23.534 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:54:23 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-08T23:54:23.472+0000 7f5765012000 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-08T23:54:23.850 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:54:23 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-08T23:54:23.534+0000 7f5765012000 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-08T23:54:23.850 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:54:23 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-08T23:54:23.591+0000 7f5765012000 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-08T23:54:23.850 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:54:23 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-08T23:54:23.654+0000 7f5765012000 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-08T23:54:23.851 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:54:23 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-08T23:54:23.714+0000 7f5765012000 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-08T23:54:24.350 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:54:24 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-08T23:54:24.025+0000 7f5765012000 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-08T23:54:24.350 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:54:24 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-08T23:54:24.100+0000 7f5765012000 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-08T23:54:24.929 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:54:24 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-08T23:54:24.674+0000 7f5765012000 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-08T23:54:24.929 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:54:24 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-08T23:54:24.738+0000 7f5765012000 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-08T23:54:24.929 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:54:24 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-08T23:54:24.805+0000 7f5765012000 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-08T23:54:25.186 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:54:24 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-08T23:54:24.928+0000 7f5765012000 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-08T23:54:25.186 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:54:24 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-08T23:54:24.992+0000 7f5765012000 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-08T23:54:25.186 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:54:25 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-08T23:54:25.093+0000 7f5765012000 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-08T23:54:25.507 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:54:25 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-08T23:54:25.186+0000 7f5765012000 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-08T23:54:25.791 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:54:25 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-08T23:54:25.507+0000 7f5765012000 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-08T23:54:25.791 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:54:25 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-08T23:54:25.573+0000 7f5765012000 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-08T23:54:25.791 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:25 vm04 ceph-mon[46823]: Active manager daemon y restarted 2026-03-08T23:54:25.791 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:25 vm04 ceph-mon[46823]: Activating manager daemon y 2026-03-08T23:54:25.791 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:25 vm04 ceph-mon[46823]: osdmap e3: 0 total, 0 up, 0 in 2026-03-08T23:54:26.095 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:54:25 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: [08/Mar/2026:23:54:25] ENGINE Bus STARTING 2026-03-08T23:54:26.095 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:54:26 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: [08/Mar/2026:23:54:26] ENGINE Serving on https://192.168.123.104:7150 2026-03-08T23:54:26.095 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:54:26 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: [08/Mar/2026:23:54:26] ENGINE Bus STARTED 2026-03-08T23:54:26.649 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: { 2026-03-08T23:54:26.649 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "mgrmap_epoch": 11, 2026-03-08T23:54:26.649 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: "initialized": true 2026-03-08T23:54:26.649 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: } 2026-03-08T23:54:26.680 INFO:teuthology.orchestra.run.vm04.stderr:mgr epoch 9 is available 2026-03-08T23:54:26.680 INFO:teuthology.orchestra.run.vm04.stderr:Generating a dashboard self-signed certificate... 2026-03-08T23:54:26.875 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:26 vm04 ceph-mon[46823]: mgrmap e10: y(active, starting, since 0.0552115s) 2026-03-08T23:54:26.875 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:26 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-08T23:54:26.875 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:26 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-08T23:54:26.875 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:26 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-08T23:54:26.875 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:26 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-08T23:54:26.875 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:26 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-08T23:54:26.875 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:26 vm04 ceph-mon[46823]: Manager daemon y is now available 2026-03-08T23:54:26.875 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:26 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:54:26.875 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:26 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:54:26.875 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:26 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:54:26.875 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:26 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-08T23:54:26.875 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:26 vm04 ceph-mon[46823]: [08/Mar/2026:23:54:25] ENGINE Bus STARTING 2026-03-08T23:54:26.875 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:26 vm04 ceph-mon[46823]: [08/Mar/2026:23:54:26] ENGINE Serving on https://192.168.123.104:7150 2026-03-08T23:54:26.875 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:26 vm04 ceph-mon[46823]: [08/Mar/2026:23:54:26] ENGINE Bus STARTED 2026-03-08T23:54:26.875 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:26 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:26.875 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:26 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:26.875 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:26 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:27.013 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: Self-signed certificate created 2026-03-08T23:54:27.046 INFO:teuthology.orchestra.run.vm04.stderr:Creating initial admin user... 2026-03-08T23:54:27.447 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: {"username": "admin", "password": "$2b$12$u4/z5PtjJyfYKvyVLod2y.AiqmQXJfFsSsgAebeacpOtabQbUGCAK", "roles": ["administrator"], "name": null, "email": null, "lastUpdate": 1773014067, "enabled": true, "pwdExpirationDate": null, "pwdUpdateRequired": true} 2026-03-08T23:54:27.487 INFO:teuthology.orchestra.run.vm04.stderr:Fetching dashboard port number... 2026-03-08T23:54:27.763 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:27 vm04 ceph-mon[46823]: mgrmap e11: y(active, since 1.06241s) 2026-03-08T23:54:27.763 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:27 vm04 ceph-mon[46823]: from='client.14156 -' entity='client.admin' cmd=[{"prefix": "get_command_descriptions"}]: dispatch 2026-03-08T23:54:27.763 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:27 vm04 ceph-mon[46823]: from='client.14156 -' entity='client.admin' cmd=[{"prefix": "mgr_status"}]: dispatch 2026-03-08T23:54:27.763 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:27 vm04 ceph-mon[46823]: from='client.14164 -' entity='client.admin' cmd=[{"prefix": "dashboard create-self-signed-cert", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:54:27.763 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:27 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:27.763 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:27 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:27.763 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:27 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:27.763 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: 8443 2026-03-08T23:54:27.813 INFO:teuthology.orchestra.run.vm04.stderr:firewalld does not appear to be present 2026-03-08T23:54:27.813 INFO:teuthology.orchestra.run.vm04.stderr:Not possible to open ports <[8443]>. firewalld.service is not available 2026-03-08T23:54:27.815 INFO:teuthology.orchestra.run.vm04.stderr:Ceph Dashboard is now available at: 2026-03-08T23:54:27.815 INFO:teuthology.orchestra.run.vm04.stderr: 2026-03-08T23:54:27.815 INFO:teuthology.orchestra.run.vm04.stderr: URL: https://vm04.local:8443/ 2026-03-08T23:54:27.815 INFO:teuthology.orchestra.run.vm04.stderr: User: admin 2026-03-08T23:54:27.815 INFO:teuthology.orchestra.run.vm04.stderr: Password: 8gfwl8bma4 2026-03-08T23:54:27.815 INFO:teuthology.orchestra.run.vm04.stderr: 2026-03-08T23:54:27.816 INFO:teuthology.orchestra.run.vm04.stderr:Enabling autotune for osd_memory_target 2026-03-08T23:54:28.504 INFO:teuthology.orchestra.run.vm04.stderr:/usr/bin/ceph: set mgr/dashboard/cluster/status 2026-03-08T23:54:28.715 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:28 vm04 ceph-mon[46823]: from='client.14166 -' entity='client.admin' cmd=[{"prefix": "dashboard ac-user-create", "username": "admin", "rolename": "administrator", "force_password": true, "pwd_update_required": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:54:28.715 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:28 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/2936813014' entity='client.admin' cmd=[{"prefix": "config get", "who": "mgr", "key": "mgr/dashboard/ssl_server_port"}]: dispatch 2026-03-08T23:54:28.715 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:28 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/508858986' entity='client.admin' 2026-03-08T23:54:28.731 INFO:teuthology.orchestra.run.vm04.stderr:You can access the Ceph CLI with: 2026-03-08T23:54:28.731 INFO:teuthology.orchestra.run.vm04.stderr: 2026-03-08T23:54:28.732 INFO:teuthology.orchestra.run.vm04.stderr: sudo /home/ubuntu/cephtest/cephadm shell --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring 2026-03-08T23:54:28.732 INFO:teuthology.orchestra.run.vm04.stderr: 2026-03-08T23:54:28.732 INFO:teuthology.orchestra.run.vm04.stderr:Please consider enabling telemetry to help improve Ceph: 2026-03-08T23:54:28.732 INFO:teuthology.orchestra.run.vm04.stderr: 2026-03-08T23:54:28.732 INFO:teuthology.orchestra.run.vm04.stderr: ceph telemetry on 2026-03-08T23:54:28.732 INFO:teuthology.orchestra.run.vm04.stderr: 2026-03-08T23:54:28.732 INFO:teuthology.orchestra.run.vm04.stderr:For more information see: 2026-03-08T23:54:28.732 INFO:teuthology.orchestra.run.vm04.stderr: 2026-03-08T23:54:28.732 INFO:teuthology.orchestra.run.vm04.stderr: https://docs.ceph.com/docs/master/mgr/telemetry/ 2026-03-08T23:54:28.732 INFO:teuthology.orchestra.run.vm04.stderr: 2026-03-08T23:54:28.732 INFO:teuthology.orchestra.run.vm04.stderr:Bootstrap complete. 2026-03-08T23:54:28.768 INFO:tasks.cephadm:Fetching config... 2026-03-08T23:54:28.768 DEBUG:teuthology.orchestra.run.vm04:> set -ex 2026-03-08T23:54:28.768 DEBUG:teuthology.orchestra.run.vm04:> dd if=/etc/ceph/ceph.conf of=/dev/stdout 2026-03-08T23:54:28.803 INFO:tasks.cephadm:Fetching client.admin keyring... 2026-03-08T23:54:28.803 DEBUG:teuthology.orchestra.run.vm04:> set -ex 2026-03-08T23:54:28.803 DEBUG:teuthology.orchestra.run.vm04:> dd if=/etc/ceph/ceph.client.admin.keyring of=/dev/stdout 2026-03-08T23:54:28.873 INFO:tasks.cephadm:Fetching mon keyring... 2026-03-08T23:54:28.873 DEBUG:teuthology.orchestra.run.vm04:> set -ex 2026-03-08T23:54:28.873 DEBUG:teuthology.orchestra.run.vm04:> sudo dd if=/var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/mon.a/keyring of=/dev/stdout 2026-03-08T23:54:28.940 INFO:tasks.cephadm:Fetching pub ssh key... 2026-03-08T23:54:28.941 DEBUG:teuthology.orchestra.run.vm04:> set -ex 2026-03-08T23:54:28.941 DEBUG:teuthology.orchestra.run.vm04:> dd if=/home/ubuntu/cephtest/ceph.pub of=/dev/stdout 2026-03-08T23:54:28.999 INFO:tasks.cephadm:Installing pub ssh key for root users... 2026-03-08T23:54:28.999 DEBUG:teuthology.orchestra.run.vm04:> sudo install -d -m 0700 /root/.ssh && echo 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDeYN83xvH6q3BQIfs0bSGWdk/pkE3Ip18A79tUNXHHUxi/HiEWqScRO+Xk/L58R0h+sN6hG0s0qXb8GXTZj5QnaQge9vQWss4JevCNEV50IgkjrCwPURkOQs8f7vXuyT3YMNxTW/DIRseP7E8bY/yn/GCP+VKGq9n3MNAR10PjH8gyZJWZec1VBQZEWE+ShubRoY/K9N00WkjQ0y15M9C/wm9Ml76HFHMbILb21U/2MVe3+JJFVYNVMMJ6hw5N0x4ESAYXJJEPouJNKjLXQgzMjIOuAl7DHbXkPfXunAwTUco/L8JHYTeABzoJKw3Ak0yti2P0okrk2qOxGJeeJf5YHNz6sRMRJ/AutAKwKjsy63uUN4ScMKKOKG2FSPrwXTY4srF2O8rCSt2d20IzM6Cp8rlwe2vkT6uiO5f579c8x2Rt7k5eRTOHp9pij2v+TR4V3jPtggQkiwvwdQoauW0emxuLm8z0aMG7D7MutDBUhBrqewn/H95PHiEoK+lbpSM= ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9' | sudo tee -a /root/.ssh/authorized_keys && sudo chmod 0600 /root/.ssh/authorized_keys 2026-03-08T23:54:29.080 INFO:teuthology.orchestra.run.vm04.stdout:ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDeYN83xvH6q3BQIfs0bSGWdk/pkE3Ip18A79tUNXHHUxi/HiEWqScRO+Xk/L58R0h+sN6hG0s0qXb8GXTZj5QnaQge9vQWss4JevCNEV50IgkjrCwPURkOQs8f7vXuyT3YMNxTW/DIRseP7E8bY/yn/GCP+VKGq9n3MNAR10PjH8gyZJWZec1VBQZEWE+ShubRoY/K9N00WkjQ0y15M9C/wm9Ml76HFHMbILb21U/2MVe3+JJFVYNVMMJ6hw5N0x4ESAYXJJEPouJNKjLXQgzMjIOuAl7DHbXkPfXunAwTUco/L8JHYTeABzoJKw3Ak0yti2P0okrk2qOxGJeeJf5YHNz6sRMRJ/AutAKwKjsy63uUN4ScMKKOKG2FSPrwXTY4srF2O8rCSt2d20IzM6Cp8rlwe2vkT6uiO5f579c8x2Rt7k5eRTOHp9pij2v+TR4V3jPtggQkiwvwdQoauW0emxuLm8z0aMG7D7MutDBUhBrqewn/H95PHiEoK+lbpSM= ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9 2026-03-08T23:54:29.092 DEBUG:teuthology.orchestra.run.vm10:> sudo install -d -m 0700 /root/.ssh && echo 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDeYN83xvH6q3BQIfs0bSGWdk/pkE3Ip18A79tUNXHHUxi/HiEWqScRO+Xk/L58R0h+sN6hG0s0qXb8GXTZj5QnaQge9vQWss4JevCNEV50IgkjrCwPURkOQs8f7vXuyT3YMNxTW/DIRseP7E8bY/yn/GCP+VKGq9n3MNAR10PjH8gyZJWZec1VBQZEWE+ShubRoY/K9N00WkjQ0y15M9C/wm9Ml76HFHMbILb21U/2MVe3+JJFVYNVMMJ6hw5N0x4ESAYXJJEPouJNKjLXQgzMjIOuAl7DHbXkPfXunAwTUco/L8JHYTeABzoJKw3Ak0yti2P0okrk2qOxGJeeJf5YHNz6sRMRJ/AutAKwKjsy63uUN4ScMKKOKG2FSPrwXTY4srF2O8rCSt2d20IzM6Cp8rlwe2vkT6uiO5f579c8x2Rt7k5eRTOHp9pij2v+TR4V3jPtggQkiwvwdQoauW0emxuLm8z0aMG7D7MutDBUhBrqewn/H95PHiEoK+lbpSM= ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9' | sudo tee -a /root/.ssh/authorized_keys && sudo chmod 0600 /root/.ssh/authorized_keys 2026-03-08T23:54:29.138 INFO:teuthology.orchestra.run.vm10.stdout:ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDeYN83xvH6q3BQIfs0bSGWdk/pkE3Ip18A79tUNXHHUxi/HiEWqScRO+Xk/L58R0h+sN6hG0s0qXb8GXTZj5QnaQge9vQWss4JevCNEV50IgkjrCwPURkOQs8f7vXuyT3YMNxTW/DIRseP7E8bY/yn/GCP+VKGq9n3MNAR10PjH8gyZJWZec1VBQZEWE+ShubRoY/K9N00WkjQ0y15M9C/wm9Ml76HFHMbILb21U/2MVe3+JJFVYNVMMJ6hw5N0x4ESAYXJJEPouJNKjLXQgzMjIOuAl7DHbXkPfXunAwTUco/L8JHYTeABzoJKw3Ak0yti2P0okrk2qOxGJeeJf5YHNz6sRMRJ/AutAKwKjsy63uUN4ScMKKOKG2FSPrwXTY4srF2O8rCSt2d20IzM6Cp8rlwe2vkT6uiO5f579c8x2Rt7k5eRTOHp9pij2v+TR4V3jPtggQkiwvwdQoauW0emxuLm8z0aMG7D7MutDBUhBrqewn/H95PHiEoK+lbpSM= ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9 2026-03-08T23:54:29.151 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -- ceph config set mgr mgr/cephadm/allow_ptrace true 2026-03-08T23:54:29.795 INFO:tasks.cephadm:Distributing conf and client.admin keyring to all hosts + 0755 2026-03-08T23:54:29.796 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -- ceph orch client-keyring set client.admin '*' --mode 0755 2026-03-08T23:54:29.970 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:29 vm04 ceph-mon[46823]: mgrmap e12: y(active, since 3s) 2026-03-08T23:54:29.970 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:29 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:29.970 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:29 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm04", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:54:29.970 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:29 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:29.970 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:29 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:54:29.970 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:29 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:30.368 INFO:tasks.cephadm:Writing (initial) conf and keyring to vm10 2026-03-08T23:54:30.368 DEBUG:teuthology.orchestra.run.vm10:> set -ex 2026-03-08T23:54:30.368 DEBUG:teuthology.orchestra.run.vm10:> dd of=/etc/ceph/ceph.conf 2026-03-08T23:54:30.394 DEBUG:teuthology.orchestra.run.vm10:> set -ex 2026-03-08T23:54:30.394 DEBUG:teuthology.orchestra.run.vm10:> dd of=/etc/ceph/ceph.client.admin.keyring 2026-03-08T23:54:30.456 INFO:tasks.cephadm:Adding host vm10 to orchestrator... 2026-03-08T23:54:30.456 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -- ceph orch host add vm10 2026-03-08T23:54:30.818 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:30 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/2308844034' entity='client.admin' 2026-03-08T23:54:30.819 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:30 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:54:30.819 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:30 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:30.819 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:30 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:30.819 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:30 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:54:30.819 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:30 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:54:30.819 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:30 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:54:31.763 INFO:teuthology.orchestra.run.vm04.stdout:Added host 'vm10' with addr '192.168.123.110' 2026-03-08T23:54:31.827 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -- ceph orch host ls --format=json 2026-03-08T23:54:32.021 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:31 vm04 ceph-mon[46823]: from='client.14176 -' entity='client.admin' cmd=[{"prefix": "orch client-keyring set", "entity": "client.admin", "placement": "*", "mode": "0755", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:54:32.021 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:31 vm04 ceph-mon[46823]: Updating vm04:/etc/ceph/ceph.conf 2026-03-08T23:54:32.021 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:31 vm04 ceph-mon[46823]: Updating vm04:/etc/ceph/ceph.client.admin.keyring 2026-03-08T23:54:32.021 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:31 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:32.021 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:31 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:32.021 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:31 vm04 ceph-mon[46823]: from='client.14178 -' entity='client.admin' cmd=[{"prefix": "orch host add", "hostname": "vm10", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:54:32.021 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:31 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:32.021 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:31 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:54:32.021 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:31 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:54:32.021 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:31 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:54:32.330 INFO:teuthology.orchestra.run.vm04.stdout: 2026-03-08T23:54:32.330 INFO:teuthology.orchestra.run.vm04.stdout:[{"addr": "192.168.123.104", "hostname": "vm04", "labels": [], "status": ""}, {"addr": "192.168.123.110", "hostname": "vm10", "labels": [], "status": ""}] 2026-03-08T23:54:32.410 INFO:tasks.cephadm:Setting crush tunables to default 2026-03-08T23:54:32.410 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -- ceph osd crush tunables default 2026-03-08T23:54:33.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:33 vm04 ceph-mon[46823]: Deploying cephadm binary to vm10 2026-03-08T23:54:33.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:33 vm04 ceph-mon[46823]: Added host vm10 2026-03-08T23:54:33.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:33 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:33.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:33 vm04 ceph-mon[46823]: mgrmap e13: y(active, since 6s) 2026-03-08T23:54:33.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:33 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:33.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:33 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/3899310880' entity='client.admin' cmd=[{"prefix": "osd crush tunables", "profile": "default"}]: dispatch 2026-03-08T23:54:33.356 INFO:teuthology.orchestra.run.vm04.stderr:adjusted tunables profile to default 2026-03-08T23:54:33.931 INFO:tasks.cephadm:Adding mon.a on vm04 2026-03-08T23:54:33.931 INFO:tasks.cephadm:Adding mon.c on vm04 2026-03-08T23:54:33.931 INFO:tasks.cephadm:Adding mon.b on vm10 2026-03-08T23:54:33.931 DEBUG:teuthology.orchestra.run.vm10:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -- ceph orch apply mon '3;vm04:192.168.123.104=a;vm04:[v2:192.168.123.104:3301,v1:192.168.123.104:6790]=c;vm10:192.168.123.110=b' 2026-03-08T23:54:34.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:34 vm04 ceph-mon[46823]: from='client.14180 -' entity='client.admin' cmd=[{"prefix": "orch host ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-08T23:54:34.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:34 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/3899310880' entity='client.admin' cmd='[{"prefix": "osd crush tunables", "profile": "default"}]': finished 2026-03-08T23:54:34.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:34 vm04 ceph-mon[46823]: osdmap e4: 0 total, 0 up, 0 in 2026-03-08T23:54:34.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:34 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:34.493 INFO:teuthology.orchestra.run.vm10.stdout:Scheduled mon update... 2026-03-08T23:54:34.572 DEBUG:teuthology.orchestra.run.vm04:mon.c> sudo journalctl -f -n 0 -u ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@mon.c.service 2026-03-08T23:54:34.574 DEBUG:teuthology.orchestra.run.vm10:mon.b> sudo journalctl -f -n 0 -u ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@mon.b.service 2026-03-08T23:54:34.576 INFO:tasks.cephadm:Waiting for 3 mons in monmap... 2026-03-08T23:54:34.576 DEBUG:teuthology.orchestra.run.vm10:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -- ceph mon dump -f json 2026-03-08T23:54:35.240 INFO:teuthology.orchestra.run.vm10.stdout: 2026-03-08T23:54:35.240 INFO:teuthology.orchestra.run.vm10.stdout:{"epoch":1,"fsid":"fdcbddf6-1b49-11f1-80b0-7392062373f9","modified":"2026-03-08T23:53:57.979597Z","created":"2026-03-08T23:53:57.979597Z","min_mon_release":17,"min_mon_release_name":"quincy","election_strategy":1,"disallowed_leaders: ":"","stretch_mode":false,"tiebreaker_mon":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy"],"optional":[]},"mons":[{"rank":0,"name":"a","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:3300","nonce":0},{"type":"v1","addr":"192.168.123.104:6789","nonce":0}]},"addr":"192.168.123.104:6789/0","public_addr":"192.168.123.104:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-08T23:54:35.243 INFO:teuthology.orchestra.run.vm10.stderr:dumped monmap epoch 1 2026-03-08T23:54:35.703 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 systemd[1]: Starting Ceph mon.c for fdcbddf6-1b49-11f1-80b0-7392062373f9... 2026-03-08T23:54:35.703 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[46823]: from='client.14184 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mon", "placement": "3;vm04:192.168.123.104=a;vm04:[v2:192.168.123.104:3301,v1:192.168.123.104:6790]=c;vm10:192.168.123.110=b", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:54:35.703 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[46823]: Saving service mon spec with placement vm04:192.168.123.104=a;vm04:[v2:192.168.123.104:3301,v1:192.168.123.104:6790]=c;vm10:192.168.123.110=b;count:3 2026-03-08T23:54:35.703 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:35.703 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:35.703 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:35.704 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm10", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:54:35.704 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:35.704 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-08T23:54:35.704 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:54:35.704 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[46823]: Deploying daemon mon.c on vm04 2026-03-08T23:54:35.704 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[46823]: from='client.? 192.168.123.110:0/2567990561' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-08T23:54:36.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 podman[51039]: 2026-03-08 23:54:35.704504305 +0000 UTC m=+0.020721165 container create 5c2d9165643cad1ecd1971a3d02dce7cd0119af2d8d3355b4b622e245e11dbb7 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mon-c, RELEASE=HEAD, io.openshift.expose-services=, release=754, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, architecture=x86_64, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, name=centos-stream, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, CEPH_POINT_RELEASE=-17.2.0, GIT_CLEAN=True, build-date=2022-05-03T08:36:31.336870, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-type=git, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, com.redhat.component=centos-stream-container, vendor=Red Hat, Inc., io.k8s.display-name=CentOS Stream 8, io.openshift.tags=base centos centos-stream, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, GIT_BRANCH=HEAD, GIT_REPO=https://github.com/ceph/ceph-container.git, maintainer=Guillaume Abrioux , distribution-scope=public, io.buildah.version=1.19.8, version=8, ceph=True) 2026-03-08T23:54:36.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 podman[51039]: 2026-03-08 23:54:35.745599112 +0000 UTC m=+0.061815983 container init 5c2d9165643cad1ecd1971a3d02dce7cd0119af2d8d3355b4b622e245e11dbb7 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mon-c, version=8, vcs-type=git, ceph=True, name=centos-stream, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, GIT_BRANCH=HEAD, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, io.openshift.expose-services=, GIT_REPO=https://github.com/ceph/ceph-container.git, release=754, io.openshift.tags=base centos centos-stream, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, io.buildah.version=1.19.8, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, architecture=x86_64, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, vendor=Red Hat, Inc., io.k8s.display-name=CentOS Stream 8, GIT_CLEAN=True, RELEASE=HEAD, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, CEPH_POINT_RELEASE=-17.2.0, com.redhat.component=centos-stream-container, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, maintainer=Guillaume Abrioux , build-date=2022-05-03T08:36:31.336870, distribution-scope=public) 2026-03-08T23:54:36.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 podman[51039]: 2026-03-08 23:54:35.750370161 +0000 UTC m=+0.066587032 container start 5c2d9165643cad1ecd1971a3d02dce7cd0119af2d8d3355b4b622e245e11dbb7 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mon-c, vcs-type=git, ceph=True, name=centos-stream, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, RELEASE=HEAD, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, io.k8s.display-name=CentOS Stream 8, architecture=x86_64, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, GIT_REPO=https://github.com/ceph/ceph-container.git, com.redhat.component=centos-stream-container, vendor=Red Hat, Inc., maintainer=Guillaume Abrioux , io.buildah.version=1.19.8, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, GIT_BRANCH=HEAD, io.openshift.expose-services=, CEPH_POINT_RELEASE=-17.2.0, release=754, GIT_CLEAN=True, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, build-date=2022-05-03T08:36:31.336870, distribution-scope=public, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, version=8, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.tags=base centos centos-stream, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac) 2026-03-08T23:54:36.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 bash[51039]: 5c2d9165643cad1ecd1971a3d02dce7cd0119af2d8d3355b4b622e245e11dbb7 2026-03-08T23:54:36.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 podman[51039]: 2026-03-08 23:54:35.695287684 +0000 UTC m=+0.011504565 image pull e1d6a67b021eb077ee22bf650f1a9fb1980a2cf5c36bdb9cba9eac6de8f702d9 quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a 2026-03-08T23:54:36.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 systemd[1]: Started Ceph mon.c for fdcbddf6-1b49-11f1-80b0-7392062373f9. 2026-03-08T23:54:36.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: set uid:gid to 167:167 (ceph:ceph) 2026-03-08T23:54:36.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable), process ceph-mon, pid 2 2026-03-08T23:54:36.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: pidfile_write: ignore empty --pid-file 2026-03-08T23:54:36.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: load: jerasure load: lrc 2026-03-08T23:54:36.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: RocksDB version: 6.15.5 2026-03-08T23:54:36.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Git sha rocksdb_build_git_sha:@0@ 2026-03-08T23:54:36.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Compile date Apr 18 2022 2026-03-08T23:54:36.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: DB SUMMARY 2026-03-08T23:54:36.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: DB Session ID: NTVQBI60PBU0DJTIHHQ1 2026-03-08T23:54:36.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: CURRENT file: CURRENT 2026-03-08T23:54:36.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: IDENTITY file: IDENTITY 2026-03-08T23:54:36.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: MANIFEST file: MANIFEST-000003 size: 57 Bytes 2026-03-08T23:54:36.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: SST files in /var/lib/ceph/mon/ceph-c/store.db dir, Total Num: 0, files: 2026-03-08T23:54:36.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Write Ahead Log file in /var/lib/ceph/mon/ceph-c/store.db: 000004.log size: 511 ; 2026-03-08T23:54:36.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.error_if_exists: 0 2026-03-08T23:54:36.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.create_if_missing: 0 2026-03-08T23:54:36.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.paranoid_checks: 1 2026-03-08T23:54:36.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.track_and_verify_wals_in_manifest: 0 2026-03-08T23:54:36.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.env: 0x55ca4acc6860 2026-03-08T23:54:36.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.fs: Posix File System 2026-03-08T23:54:36.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.info_log: 0x55ca4ca75dc0 2026-03-08T23:54:36.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.max_file_opening_threads: 16 2026-03-08T23:54:36.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.statistics: (nil) 2026-03-08T23:54:36.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.use_fsync: 0 2026-03-08T23:54:36.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.max_log_file_size: 0 2026-03-08T23:54:36.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.max_manifest_file_size: 1073741824 2026-03-08T23:54:36.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.log_file_time_to_roll: 0 2026-03-08T23:54:36.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.keep_log_file_num: 1000 2026-03-08T23:54:36.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.recycle_log_file_num: 0 2026-03-08T23:54:36.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.allow_fallocate: 1 2026-03-08T23:54:36.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.allow_mmap_reads: 0 2026-03-08T23:54:36.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.allow_mmap_writes: 0 2026-03-08T23:54:36.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.use_direct_reads: 0 2026-03-08T23:54:36.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.use_direct_io_for_flush_and_compaction: 0 2026-03-08T23:54:36.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.create_missing_column_families: 0 2026-03-08T23:54:36.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.db_log_dir: 2026-03-08T23:54:36.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.wal_dir: /var/lib/ceph/mon/ceph-c/store.db 2026-03-08T23:54:36.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.table_cache_numshardbits: 6 2026-03-08T23:54:36.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.WAL_ttl_seconds: 0 2026-03-08T23:54:36.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.WAL_size_limit_MB: 0 2026-03-08T23:54:36.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.max_write_batch_group_size_bytes: 1048576 2026-03-08T23:54:36.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.manifest_preallocation_size: 4194304 2026-03-08T23:54:36.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.is_fd_close_on_exec: 1 2026-03-08T23:54:36.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.advise_random_on_open: 1 2026-03-08T23:54:36.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.db_write_buffer_size: 0 2026-03-08T23:54:36.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.write_buffer_manager: 0x55ca4cb66240 2026-03-08T23:54:36.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.access_hint_on_compaction_start: 1 2026-03-08T23:54:36.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.new_table_reader_for_compaction_inputs: 0 2026-03-08T23:54:36.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.random_access_max_buffer_size: 1048576 2026-03-08T23:54:36.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.use_adaptive_mutex: 0 2026-03-08T23:54:36.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.rate_limiter: (nil) 2026-03-08T23:54:36.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.sst_file_manager.rate_bytes_per_sec: 0 2026-03-08T23:54:36.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.wal_recovery_mode: 2 2026-03-08T23:54:36.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.enable_thread_tracking: 0 2026-03-08T23:54:36.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.enable_pipelined_write: 0 2026-03-08T23:54:36.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.unordered_write: 0 2026-03-08T23:54:36.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.allow_concurrent_memtable_write: 1 2026-03-08T23:54:36.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.enable_write_thread_adaptive_yield: 1 2026-03-08T23:54:36.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.write_thread_max_yield_usec: 100 2026-03-08T23:54:36.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.write_thread_slow_yield_usec: 3 2026-03-08T23:54:36.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.row_cache: None 2026-03-08T23:54:36.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.wal_filter: None 2026-03-08T23:54:36.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.avoid_flush_during_recovery: 0 2026-03-08T23:54:36.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.allow_ingest_behind: 0 2026-03-08T23:54:36.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.preserve_deletes: 0 2026-03-08T23:54:36.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.two_write_queues: 0 2026-03-08T23:54:36.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.manual_wal_flush: 0 2026-03-08T23:54:36.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.atomic_flush: 0 2026-03-08T23:54:36.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.avoid_unnecessary_blocking_io: 0 2026-03-08T23:54:36.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.persist_stats_to_disk: 0 2026-03-08T23:54:36.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.write_dbid_to_manifest: 0 2026-03-08T23:54:36.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.log_readahead_size: 0 2026-03-08T23:54:36.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.file_checksum_gen_factory: Unknown 2026-03-08T23:54:36.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.best_efforts_recovery: 0 2026-03-08T23:54:36.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.max_bgerror_resume_count: 2147483647 2026-03-08T23:54:36.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.bgerror_resume_retry_interval: 1000000 2026-03-08T23:54:36.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.allow_data_in_errors: 0 2026-03-08T23:54:36.104 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.db_host_id: __hostname__ 2026-03-08T23:54:36.104 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.max_background_jobs: 2 2026-03-08T23:54:36.104 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.max_background_compactions: -1 2026-03-08T23:54:36.104 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.max_subcompactions: 1 2026-03-08T23:54:36.104 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.avoid_flush_during_shutdown: 0 2026-03-08T23:54:36.104 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.writable_file_max_buffer_size: 1048576 2026-03-08T23:54:36.104 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.delayed_write_rate : 16777216 2026-03-08T23:54:36.104 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.max_total_wal_size: 0 2026-03-08T23:54:36.104 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.delete_obsolete_files_period_micros: 21600000000 2026-03-08T23:54:36.104 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.stats_dump_period_sec: 600 2026-03-08T23:54:36.104 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.stats_persist_period_sec: 600 2026-03-08T23:54:36.104 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.stats_history_buffer_size: 1048576 2026-03-08T23:54:36.104 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.max_open_files: -1 2026-03-08T23:54:36.104 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.bytes_per_sync: 0 2026-03-08T23:54:36.104 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.wal_bytes_per_sync: 0 2026-03-08T23:54:36.104 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.strict_bytes_per_sync: 0 2026-03-08T23:54:36.104 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.compaction_readahead_size: 0 2026-03-08T23:54:36.104 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.max_background_flushes: -1 2026-03-08T23:54:36.104 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Compression algorithms supported: 2026-03-08T23:54:36.104 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: kZSTDNotFinalCompression supported: 0 2026-03-08T23:54:36.104 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: kZSTD supported: 0 2026-03-08T23:54:36.104 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: kXpressCompression supported: 0 2026-03-08T23:54:36.104 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: kLZ4HCCompression supported: 1 2026-03-08T23:54:36.104 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: kLZ4Compression supported: 1 2026-03-08T23:54:36.104 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: kBZip2Compression supported: 0 2026-03-08T23:54:36.104 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: kZlibCompression supported: 1 2026-03-08T23:54:36.104 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: kSnappyCompression supported: 1 2026-03-08T23:54:36.104 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Fast CRC32 supported: Supported on x86 2026-03-08T23:54:36.104 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: [db/version_set.cc:4725] Recovering from manifest file: /var/lib/ceph/mon/ceph-c/store.db/MANIFEST-000003 2026-03-08T23:54:36.104 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: [db/column_family.cc:597] --------------- Options for column family [default]: 2026-03-08T23:54:36.104 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.comparator: leveldb.BytewiseComparator 2026-03-08T23:54:36.104 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.merge_operator: 2026-03-08T23:54:36.104 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.compaction_filter: None 2026-03-08T23:54:36.104 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.compaction_filter_factory: None 2026-03-08T23:54:36.104 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.sst_partitioner_factory: None 2026-03-08T23:54:36.104 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.memtable_factory: SkipListFactory 2026-03-08T23:54:36.104 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.table_factory: BlockBasedTable 2026-03-08T23:54:36.104 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: table_factory options: flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x55ca4ca41d00) 2026-03-08T23:54:36.105 INFO:journalctl@ceph.mon.c.vm04.stdout: cache_index_and_filter_blocks: 1 2026-03-08T23:54:36.105 INFO:journalctl@ceph.mon.c.vm04.stdout: cache_index_and_filter_blocks_with_high_priority: 0 2026-03-08T23:54:36.105 INFO:journalctl@ceph.mon.c.vm04.stdout: pin_l0_filter_and_index_blocks_in_cache: 0 2026-03-08T23:54:36.105 INFO:journalctl@ceph.mon.c.vm04.stdout: pin_top_level_index_and_filter: 1 2026-03-08T23:54:36.105 INFO:journalctl@ceph.mon.c.vm04.stdout: index_type: 0 2026-03-08T23:54:36.105 INFO:journalctl@ceph.mon.c.vm04.stdout: data_block_index_type: 0 2026-03-08T23:54:36.105 INFO:journalctl@ceph.mon.c.vm04.stdout: index_shortening: 1 2026-03-08T23:54:36.105 INFO:journalctl@ceph.mon.c.vm04.stdout: data_block_hash_table_util_ratio: 0.750000 2026-03-08T23:54:36.105 INFO:journalctl@ceph.mon.c.vm04.stdout: hash_index_allow_collision: 1 2026-03-08T23:54:36.105 INFO:journalctl@ceph.mon.c.vm04.stdout: checksum: 1 2026-03-08T23:54:36.105 INFO:journalctl@ceph.mon.c.vm04.stdout: no_block_cache: 0 2026-03-08T23:54:36.105 INFO:journalctl@ceph.mon.c.vm04.stdout: block_cache: 0x55ca4caac170 2026-03-08T23:54:36.105 INFO:journalctl@ceph.mon.c.vm04.stdout: block_cache_name: BinnedLRUCache 2026-03-08T23:54:36.105 INFO:journalctl@ceph.mon.c.vm04.stdout: block_cache_options: 2026-03-08T23:54:36.105 INFO:journalctl@ceph.mon.c.vm04.stdout: capacity : 536870912 2026-03-08T23:54:36.105 INFO:journalctl@ceph.mon.c.vm04.stdout: num_shard_bits : 4 2026-03-08T23:54:36.105 INFO:journalctl@ceph.mon.c.vm04.stdout: strict_capacity_limit : 0 2026-03-08T23:54:36.105 INFO:journalctl@ceph.mon.c.vm04.stdout: high_pri_pool_ratio: 0.000 2026-03-08T23:54:36.105 INFO:journalctl@ceph.mon.c.vm04.stdout: block_cache_compressed: (nil) 2026-03-08T23:54:36.105 INFO:journalctl@ceph.mon.c.vm04.stdout: persistent_cache: (nil) 2026-03-08T23:54:36.105 INFO:journalctl@ceph.mon.c.vm04.stdout: block_size: 4096 2026-03-08T23:54:36.105 INFO:journalctl@ceph.mon.c.vm04.stdout: block_size_deviation: 10 2026-03-08T23:54:36.105 INFO:journalctl@ceph.mon.c.vm04.stdout: block_restart_interval: 16 2026-03-08T23:54:36.105 INFO:journalctl@ceph.mon.c.vm04.stdout: index_block_restart_interval: 1 2026-03-08T23:54:36.105 INFO:journalctl@ceph.mon.c.vm04.stdout: metadata_block_size: 4096 2026-03-08T23:54:36.105 INFO:journalctl@ceph.mon.c.vm04.stdout: partition_filters: 0 2026-03-08T23:54:36.105 INFO:journalctl@ceph.mon.c.vm04.stdout: use_delta_encoding: 1 2026-03-08T23:54:36.105 INFO:journalctl@ceph.mon.c.vm04.stdout: filter_policy: rocksdb.BuiltinBloomFilter 2026-03-08T23:54:36.105 INFO:journalctl@ceph.mon.c.vm04.stdout: whole_key_filtering: 1 2026-03-08T23:54:36.105 INFO:journalctl@ceph.mon.c.vm04.stdout: verify_compression: 0 2026-03-08T23:54:36.105 INFO:journalctl@ceph.mon.c.vm04.stdout: read_amp_bytes_per_bit: 0 2026-03-08T23:54:36.105 INFO:journalctl@ceph.mon.c.vm04.stdout: format_version: 4 2026-03-08T23:54:36.105 INFO:journalctl@ceph.mon.c.vm04.stdout: enable_index_compression: 1 2026-03-08T23:54:36.105 INFO:journalctl@ceph.mon.c.vm04.stdout: block_align: 0 2026-03-08T23:54:36.105 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.write_buffer_size: 33554432 2026-03-08T23:54:36.105 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.max_write_buffer_number: 2 2026-03-08T23:54:36.105 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.compression: NoCompression 2026-03-08T23:54:36.105 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.bottommost_compression: Disabled 2026-03-08T23:54:36.105 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.prefix_extractor: nullptr 2026-03-08T23:54:36.105 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.memtable_insert_with_hint_prefix_extractor: nullptr 2026-03-08T23:54:36.105 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.num_levels: 7 2026-03-08T23:54:36.105 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.min_write_buffer_number_to_merge: 1 2026-03-08T23:54:36.106 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.max_write_buffer_number_to_maintain: 0 2026-03-08T23:54:36.106 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.max_write_buffer_size_to_maintain: 0 2026-03-08T23:54:36.106 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.bottommost_compression_opts.window_bits: -14 2026-03-08T23:54:36.106 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.bottommost_compression_opts.level: 32767 2026-03-08T23:54:36.106 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.bottommost_compression_opts.strategy: 0 2026-03-08T23:54:36.106 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.bottommost_compression_opts.max_dict_bytes: 0 2026-03-08T23:54:36.106 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.bottommost_compression_opts.zstd_max_train_bytes: 0 2026-03-08T23:54:36.106 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.bottommost_compression_opts.parallel_threads: 1 2026-03-08T23:54:36.106 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.bottommost_compression_opts.enabled: false 2026-03-08T23:54:36.106 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.compression_opts.window_bits: -14 2026-03-08T23:54:36.106 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.compression_opts.level: 32767 2026-03-08T23:54:36.106 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.compression_opts.strategy: 0 2026-03-08T23:54:36.106 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.compression_opts.max_dict_bytes: 0 2026-03-08T23:54:36.106 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.compression_opts.zstd_max_train_bytes: 0 2026-03-08T23:54:36.106 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.compression_opts.parallel_threads: 1 2026-03-08T23:54:36.106 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.compression_opts.enabled: false 2026-03-08T23:54:36.106 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.level0_file_num_compaction_trigger: 4 2026-03-08T23:54:36.106 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.level0_slowdown_writes_trigger: 20 2026-03-08T23:54:36.106 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.level0_stop_writes_trigger: 36 2026-03-08T23:54:36.106 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.target_file_size_base: 67108864 2026-03-08T23:54:36.106 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.target_file_size_multiplier: 1 2026-03-08T23:54:36.106 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.max_bytes_for_level_base: 268435456 2026-03-08T23:54:36.106 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.level_compaction_dynamic_level_bytes: 1 2026-03-08T23:54:36.106 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.max_bytes_for_level_multiplier: 10.000000 2026-03-08T23:54:36.106 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1 2026-03-08T23:54:36.106 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1 2026-03-08T23:54:36.106 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1 2026-03-08T23:54:36.106 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1 2026-03-08T23:54:36.106 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1 2026-03-08T23:54:36.106 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1 2026-03-08T23:54:36.106 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1 2026-03-08T23:54:36.106 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.max_sequential_skip_in_iterations: 8 2026-03-08T23:54:36.106 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.max_compaction_bytes: 1677721600 2026-03-08T23:54:36.106 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.arena_block_size: 4194304 2026-03-08T23:54:36.106 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.soft_pending_compaction_bytes_limit: 68719476736 2026-03-08T23:54:36.106 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.hard_pending_compaction_bytes_limit: 274877906944 2026-03-08T23:54:36.106 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.rate_limit_delay_max_milliseconds: 100 2026-03-08T23:54:36.106 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.disable_auto_compactions: 0 2026-03-08T23:54:36.106 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.compaction_style: kCompactionStyleLevel 2026-03-08T23:54:36.106 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.compaction_pri: kMinOverlappingRatio 2026-03-08T23:54:36.106 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.compaction_options_universal.size_ratio: 1 2026-03-08T23:54:36.106 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.compaction_options_universal.min_merge_width: 2 2026-03-08T23:54:36.106 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295 2026-03-08T23:54:36.106 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200 2026-03-08T23:54:36.106 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1 2026-03-08T23:54:36.107 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize 2026-03-08T23:54:36.107 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824 2026-03-08T23:54:36.107 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0 2026-03-08T23:54:36.107 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.table_properties_collectors: 2026-03-08T23:54:36.107 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.inplace_update_support: 0 2026-03-08T23:54:36.107 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.inplace_update_num_locks: 10000 2026-03-08T23:54:36.107 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.memtable_prefix_bloom_size_ratio: 0.000000 2026-03-08T23:54:36.107 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.memtable_whole_key_filtering: 0 2026-03-08T23:54:36.107 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.memtable_huge_page_size: 0 2026-03-08T23:54:36.107 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.bloom_locality: 0 2026-03-08T23:54:36.107 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.max_successive_merges: 0 2026-03-08T23:54:36.107 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.optimize_filters_for_hits: 0 2026-03-08T23:54:36.107 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.paranoid_file_checks: 0 2026-03-08T23:54:36.107 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.force_consistency_checks: 1 2026-03-08T23:54:36.107 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.report_bg_io_stats: 0 2026-03-08T23:54:36.107 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.ttl: 2592000 2026-03-08T23:54:36.107 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.periodic_compaction_seconds: 0 2026-03-08T23:54:36.107 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.enable_blob_files: false 2026-03-08T23:54:36.107 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.min_blob_size: 0 2026-03-08T23:54:36.107 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.blob_file_size: 268435456 2026-03-08T23:54:36.107 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.blob_compression_type: NoCompression 2026-03-08T23:54:36.107 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.enable_blob_garbage_collection: false 2026-03-08T23:54:36.107 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: Options.blob_garbage_collection_age_cutoff: 0.250000 2026-03-08T23:54:36.107 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: [db/version_set.cc:4773] Recovered from manifest file:/var/lib/ceph/mon/ceph-c/store.db/MANIFEST-000003 succeeded,manifest_file_number is 3, next_file_number is 5, last_sequence is 0, log_number is 0,prev_log_number is 0,max_column_family is 0,min_log_number_to_keep is 0 2026-03-08T23:54:36.107 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: [db/version_set.cc:4782] Column family [default] (ID 0), log number is 0 2026-03-08T23:54:36.107 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: [db/version_set.cc:4083] Creating manifest 7 2026-03-08T23:54:36.107 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: EVENT_LOG_v1 {"time_micros": 1773014075775011, "job": 1, "event": "recovery_started", "wal_files": [4]} 2026-03-08T23:54:36.107 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: [db/db_impl/db_impl_open.cc:847] Recovering log #4 mode 2 2026-03-08T23:54:36.107 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: [table/block_based/filter_policy.cc:996] Using legacy Bloom filter with high (20) bits/key. Dramatic filter space and/or accuracy improvement is available with format_version>=5. 2026-03-08T23:54:36.107 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: EVENT_LOG_v1 {"time_micros": 1773014075775569, "cf_name": "default", "job": 1, "event": "table_file_creation", "file_number": 8, "file_size": 1540, "file_checksum": "", "file_checksum_func_name": "Unknown", "table_properties": {"data_size": 523, "index_size": 31, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 69, "raw_key_size": 115, "raw_average_key_size": 23, "raw_value_size": 401, "raw_average_value_size": 80, "num_data_blocks": 1, "num_entries": 5, "num_deletions": 0, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "rocksdb.BuiltinBloomFilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; ", "creation_time": 1773014075, "oldest_key_time": 0, "file_creation_time": 0, "db_id": "684527de-bebc-40bd-96e9-188ceaba37a2", "db_session_id": "NTVQBI60PBU0DJTIHHQ1"}} 2026-03-08T23:54:36.107 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: [db/version_set.cc:4083] Creating manifest 9 2026-03-08T23:54:36.107 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: EVENT_LOG_v1 {"time_micros": 1773014075776852, "job": 1, "event": "recovery_finished"} 2026-03-08T23:54:36.107 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: [file/delete_scheduler.cc:73] Deleted file /var/lib/ceph/mon/ceph-c/store.db/000004.log immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000 2026-03-08T23:54:36.107 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: [db/db_impl/db_impl_open.cc:1701] SstFileManager instance 0x55ca4ca92700 2026-03-08T23:54:36.107 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: DB pointer 0x55ca4cb06000 2026-03-08T23:54:36.107 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: [db/db_impl/db_impl.cc:902] ------- DUMPING STATS ------- 2026-03-08T23:54:36.107 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: rocksdb: [db/db_impl/db_impl.cc:903] 2026-03-08T23:54:36.107 INFO:journalctl@ceph.mon.c.vm04.stdout: ** DB Stats ** 2026-03-08T23:54:36.107 INFO:journalctl@ceph.mon.c.vm04.stdout: Uptime(secs): 0.0 total, 0.0 interval 2026-03-08T23:54:36.107 INFO:journalctl@ceph.mon.c.vm04.stdout: Cumulative writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 GB, 0.00 MB/s 2026-03-08T23:54:36.107 INFO:journalctl@ceph.mon.c.vm04.stdout: Cumulative WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s 2026-03-08T23:54:36.107 INFO:journalctl@ceph.mon.c.vm04.stdout: Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent 2026-03-08T23:54:36.107 INFO:journalctl@ceph.mon.c.vm04.stdout: Interval writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 MB, 0.00 MB/s 2026-03-08T23:54:36.108 INFO:journalctl@ceph.mon.c.vm04.stdout: Interval WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 MB, 0.00 MB/s 2026-03-08T23:54:36.108 INFO:journalctl@ceph.mon.c.vm04.stdout: Interval stall: 00:00:0.000 H:M:S, 0.0 percent 2026-03-08T23:54:36.108 INFO:journalctl@ceph.mon.c.vm04.stdout: 2026-03-08T23:54:36.108 INFO:journalctl@ceph.mon.c.vm04.stdout: ** Compaction Stats [default] ** 2026-03-08T23:54:36.108 INFO:journalctl@ceph.mon.c.vm04.stdout: Level Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop 2026-03-08T23:54:36.108 INFO:journalctl@ceph.mon.c.vm04.stdout: ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 2026-03-08T23:54:36.108 INFO:journalctl@ceph.mon.c.vm04.stdout: L0 1/0 1.50 KB 0.2 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 2.7 0.00 0.00 1 0.001 0 0 2026-03-08T23:54:36.108 INFO:journalctl@ceph.mon.c.vm04.stdout: Sum 1/0 1.50 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 2.7 0.00 0.00 1 0.001 0 0 2026-03-08T23:54:36.108 INFO:journalctl@ceph.mon.c.vm04.stdout: Int 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 2.7 0.00 0.00 1 0.001 0 0 2026-03-08T23:54:36.108 INFO:journalctl@ceph.mon.c.vm04.stdout: 2026-03-08T23:54:36.108 INFO:journalctl@ceph.mon.c.vm04.stdout: ** Compaction Stats [default] ** 2026-03-08T23:54:36.108 INFO:journalctl@ceph.mon.c.vm04.stdout: Priority Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop 2026-03-08T23:54:36.108 INFO:journalctl@ceph.mon.c.vm04.stdout: ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 2026-03-08T23:54:36.108 INFO:journalctl@ceph.mon.c.vm04.stdout: User 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 2.7 0.00 0.00 1 0.001 0 0 2026-03-08T23:54:36.108 INFO:journalctl@ceph.mon.c.vm04.stdout: Uptime(secs): 0.0 total, 0.0 interval 2026-03-08T23:54:36.108 INFO:journalctl@ceph.mon.c.vm04.stdout: Flush(GB): cumulative 0.000, interval 0.000 2026-03-08T23:54:36.108 INFO:journalctl@ceph.mon.c.vm04.stdout: AddFile(GB): cumulative 0.000, interval 0.000 2026-03-08T23:54:36.108 INFO:journalctl@ceph.mon.c.vm04.stdout: AddFile(Total Files): cumulative 0, interval 0 2026-03-08T23:54:36.108 INFO:journalctl@ceph.mon.c.vm04.stdout: AddFile(L0 Files): cumulative 0, interval 0 2026-03-08T23:54:36.108 INFO:journalctl@ceph.mon.c.vm04.stdout: AddFile(Keys): cumulative 0, interval 0 2026-03-08T23:54:36.108 INFO:journalctl@ceph.mon.c.vm04.stdout: Cumulative compaction: 0.00 GB write, 0.24 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-08T23:54:36.108 INFO:journalctl@ceph.mon.c.vm04.stdout: Interval compaction: 0.00 GB write, 0.24 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-08T23:54:36.108 INFO:journalctl@ceph.mon.c.vm04.stdout: Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count 2026-03-08T23:54:36.108 INFO:journalctl@ceph.mon.c.vm04.stdout: 2026-03-08T23:54:36.108 INFO:journalctl@ceph.mon.c.vm04.stdout: ** File Read Latency Histogram By Level [default] ** 2026-03-08T23:54:36.108 INFO:journalctl@ceph.mon.c.vm04.stdout: 2026-03-08T23:54:36.108 INFO:journalctl@ceph.mon.c.vm04.stdout: ** Compaction Stats [default] ** 2026-03-08T23:54:36.108 INFO:journalctl@ceph.mon.c.vm04.stdout: Level Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop 2026-03-08T23:54:36.108 INFO:journalctl@ceph.mon.c.vm04.stdout: ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 2026-03-08T23:54:36.108 INFO:journalctl@ceph.mon.c.vm04.stdout: L0 1/0 1.50 KB 0.2 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 2.7 0.00 0.00 1 0.001 0 0 2026-03-08T23:54:36.108 INFO:journalctl@ceph.mon.c.vm04.stdout: Sum 1/0 1.50 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 2.7 0.00 0.00 1 0.001 0 0 2026-03-08T23:54:36.108 INFO:journalctl@ceph.mon.c.vm04.stdout: Int 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.00 0.00 0 0.000 0 0 2026-03-08T23:54:36.108 INFO:journalctl@ceph.mon.c.vm04.stdout: 2026-03-08T23:54:36.108 INFO:journalctl@ceph.mon.c.vm04.stdout: ** Compaction Stats [default] ** 2026-03-08T23:54:36.108 INFO:journalctl@ceph.mon.c.vm04.stdout: Priority Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop 2026-03-08T23:54:36.108 INFO:journalctl@ceph.mon.c.vm04.stdout: ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 2026-03-08T23:54:36.108 INFO:journalctl@ceph.mon.c.vm04.stdout: User 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 2.7 0.00 0.00 1 0.001 0 0 2026-03-08T23:54:36.108 INFO:journalctl@ceph.mon.c.vm04.stdout: Uptime(secs): 0.0 total, 0.0 interval 2026-03-08T23:54:36.108 INFO:journalctl@ceph.mon.c.vm04.stdout: Flush(GB): cumulative 0.000, interval 0.000 2026-03-08T23:54:36.108 INFO:journalctl@ceph.mon.c.vm04.stdout: AddFile(GB): cumulative 0.000, interval 0.000 2026-03-08T23:54:36.108 INFO:journalctl@ceph.mon.c.vm04.stdout: AddFile(Total Files): cumulative 0, interval 0 2026-03-08T23:54:36.108 INFO:journalctl@ceph.mon.c.vm04.stdout: AddFile(L0 Files): cumulative 0, interval 0 2026-03-08T23:54:36.108 INFO:journalctl@ceph.mon.c.vm04.stdout: AddFile(Keys): cumulative 0, interval 0 2026-03-08T23:54:36.108 INFO:journalctl@ceph.mon.c.vm04.stdout: Cumulative compaction: 0.00 GB write, 0.24 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-08T23:54:36.108 INFO:journalctl@ceph.mon.c.vm04.stdout: Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-08T23:54:36.109 INFO:journalctl@ceph.mon.c.vm04.stdout: Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count 2026-03-08T23:54:36.109 INFO:journalctl@ceph.mon.c.vm04.stdout: 2026-03-08T23:54:36.109 INFO:journalctl@ceph.mon.c.vm04.stdout: ** File Read Latency Histogram By Level [default] ** 2026-03-08T23:54:36.109 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: mon.c does not exist in monmap, will attempt to join an existing cluster 2026-03-08T23:54:36.109 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: using public_addrv [v2:192.168.123.104:3301/0,v1:192.168.123.104:6790/0] 2026-03-08T23:54:36.109 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: starting mon.c rank -1 at public addrs [v2:192.168.123.104:3301/0,v1:192.168.123.104:6790/0] at bind addrs [v2:192.168.123.104:3301/0,v1:192.168.123.104:6790/0] mon_data /var/lib/ceph/mon/ceph-c fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 2026-03-08T23:54:36.109 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: mon.c@-1(???) e0 preinit fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 2026-03-08T23:54:36.109 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: mon.c@-1(synchronizing).mds e1 new map 2026-03-08T23:54:36.109 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: mon.c@-1(synchronizing).mds e1 print_map 2026-03-08T23:54:36.109 INFO:journalctl@ceph.mon.c.vm04.stdout: e1 2026-03-08T23:54:36.109 INFO:journalctl@ceph.mon.c.vm04.stdout: enable_multiple, ever_enabled_multiple: 1,1 2026-03-08T23:54:36.109 INFO:journalctl@ceph.mon.c.vm04.stdout: default compat: compat={},rocompat={},incompat={1=base v0.20,2=client writeable ranges,3=default file layouts on dirs,4=dir inode in separate object,5=mds uses versioned encoding,6=dirfrag is stored in omap,8=no anchor table,9=file layout v2,10=snaprealm v2} 2026-03-08T23:54:36.109 INFO:journalctl@ceph.mon.c.vm04.stdout: legacy client fscid: -1 2026-03-08T23:54:36.109 INFO:journalctl@ceph.mon.c.vm04.stdout: 2026-03-08T23:54:36.109 INFO:journalctl@ceph.mon.c.vm04.stdout: No filesystems configured 2026-03-08T23:54:36.109 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: mon.c@-1(synchronizing).osd e0 _set_cache_ratios kv ratio 0.25 inc ratio 0.375 full ratio 0.375 2026-03-08T23:54:36.109 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: mon.c@-1(synchronizing).osd e0 register_cache_with_pcm pcm target: 2147483648 pcm max: 1020054732 pcm min: 134217728 inc_osd_cache size: 1 2026-03-08T23:54:36.109 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: mon.c@-1(synchronizing).osd e1 e1: 0 total, 0 up, 0 in 2026-03-08T23:54:36.109 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: mon.c@-1(synchronizing).osd e2 e2: 0 total, 0 up, 0 in 2026-03-08T23:54:36.109 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: mon.c@-1(synchronizing).osd e3 e3: 0 total, 0 up, 0 in 2026-03-08T23:54:36.109 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: mon.c@-1(synchronizing).osd e4 e4: 0 total, 0 up, 0 in 2026-03-08T23:54:36.109 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: mon.c@-1(synchronizing).osd e4 crush map has features 3314932999778484224, adjusting msgr requires 2026-03-08T23:54:36.109 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: mon.c@-1(synchronizing).osd e4 crush map has features 288514050185494528, adjusting msgr requires 2026-03-08T23:54:36.109 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: mon.c@-1(synchronizing).osd e4 crush map has features 288514050185494528, adjusting msgr requires 2026-03-08T23:54:36.109 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: mon.c@-1(synchronizing).osd e4 crush map has features 288514050185494528, adjusting msgr requires 2026-03-08T23:54:36.109 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: mkfs fdcbddf6-1b49-11f1-80b0-7392062373f9 2026-03-08T23:54:36.109 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: mon.a is new leader, mons a in quorum (ranks 0) 2026-03-08T23:54:36.109 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: mon.a is new leader, mons a in quorum (ranks 0) 2026-03-08T23:54:36.109 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: monmap e1: 1 mons at {a=[v2:192.168.123.104:3300/0,v1:192.168.123.104:6789/0]} 2026-03-08T23:54:36.109 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: fsmap 2026-03-08T23:54:36.109 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: osdmap e1: 0 total, 0 up, 0 in 2026-03-08T23:54:36.109 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: mgrmap e1: no daemons active 2026-03-08T23:54:36.109 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/1393121212' entity='client.admin' 2026-03-08T23:54:36.109 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/3616289215' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-08T23:54:36.109 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/2097967146' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-08T23:54:36.109 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/2980012117' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-08T23:54:36.109 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: Activating manager daemon y 2026-03-08T23:54:36.109 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: mgrmap e2: y(active, starting, since 0.00404125s) 2026-03-08T23:54:36.109 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='mgr.14100 192.168.123.104:0/1394689324' entity='mgr.y' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-08T23:54:36.109 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='mgr.14100 192.168.123.104:0/1394689324' entity='mgr.y' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-08T23:54:36.109 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='mgr.14100 192.168.123.104:0/1394689324' entity='mgr.y' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-08T23:54:36.109 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='mgr.14100 192.168.123.104:0/1394689324' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-08T23:54:36.109 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='mgr.14100 192.168.123.104:0/1394689324' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-08T23:54:36.109 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: Manager daemon y is now available 2026-03-08T23:54:36.109 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='mgr.14100 192.168.123.104:0/1394689324' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:54:36.109 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='mgr.14100 192.168.123.104:0/1394689324' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-08T23:54:36.109 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='mgr.14100 192.168.123.104:0/1394689324' entity='mgr.y' 2026-03-08T23:54:36.109 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='mgr.14100 192.168.123.104:0/1394689324' entity='mgr.y' 2026-03-08T23:54:36.109 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='mgr.14100 192.168.123.104:0/1394689324' entity='mgr.y' 2026-03-08T23:54:36.109 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: mgrmap e3: y(active, since 1.00836s) 2026-03-08T23:54:36.109 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: mgrmap e4: y(active, since 2s) 2026-03-08T23:54:36.109 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/1334107004' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-08T23:54:36.109 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/2765347664' entity='client.admin' cmd=[{"prefix": "config assimilate-conf"}]: dispatch 2026-03-08T23:54:36.109 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/2765347664' entity='client.admin' cmd='[{"prefix": "config assimilate-conf"}]': finished 2026-03-08T23:54:36.109 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/4015523670' entity='client.admin' cmd=[{"prefix": "mgr module enable", "module": "cephadm"}]: dispatch 2026-03-08T23:54:36.109 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/4015523670' entity='client.admin' cmd='[{"prefix": "mgr module enable", "module": "cephadm"}]': finished 2026-03-08T23:54:36.109 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: mgrmap e5: y(active, since 3s) 2026-03-08T23:54:36.109 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/287678608' entity='client.admin' cmd=[{"prefix": "mgr stat"}]: dispatch 2026-03-08T23:54:36.109 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: Active manager daemon y restarted 2026-03-08T23:54:36.109 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: Activating manager daemon y 2026-03-08T23:54:36.109 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: osdmap e2: 0 total, 0 up, 0 in 2026-03-08T23:54:36.109 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: mgrmap e6: y(active, starting, since 0.0545811s) 2026-03-08T23:54:36.109 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='mgr.14120 192.168.123.104:0/1762411969' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-08T23:54:36.110 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='mgr.14120 192.168.123.104:0/1762411969' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-08T23:54:36.110 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='mgr.14120 192.168.123.104:0/1762411969' entity='mgr.y' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-08T23:54:36.110 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='mgr.14120 192.168.123.104:0/1762411969' entity='mgr.y' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-08T23:54:36.110 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='mgr.14120 192.168.123.104:0/1762411969' entity='mgr.y' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-08T23:54:36.110 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: Manager daemon y is now available 2026-03-08T23:54:36.110 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='mgr.14120 192.168.123.104:0/1762411969' entity='mgr.y' 2026-03-08T23:54:36.110 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='mgr.14120 192.168.123.104:0/1762411969' entity='mgr.y' 2026-03-08T23:54:36.110 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='mgr.14120 192.168.123.104:0/1762411969' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:54:36.110 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='mgr.14120 192.168.123.104:0/1762411969' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:54:36.110 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='mgr.14120 192.168.123.104:0/1762411969' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:54:36.110 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='mgr.14120 192.168.123.104:0/1762411969' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:54:36.110 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='mgr.14120 192.168.123.104:0/1762411969' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-08T23:54:36.110 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: [08/Mar/2026:23:54:16] ENGINE Bus STARTING 2026-03-08T23:54:36.110 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: mgrmap e7: y(active, since 1.07001s) 2026-03-08T23:54:36.110 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='client.14124 -' entity='client.admin' cmd=[{"prefix": "get_command_descriptions"}]: dispatch 2026-03-08T23:54:36.110 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='client.14124 -' entity='client.admin' cmd=[{"prefix": "mgr_status"}]: dispatch 2026-03-08T23:54:36.110 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: [08/Mar/2026:23:54:16] ENGINE Serving on https://192.168.123.104:7150 2026-03-08T23:54:36.110 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: [08/Mar/2026:23:54:16] ENGINE Bus STARTED 2026-03-08T23:54:36.110 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='mgr.14120 192.168.123.104:0/1762411969' entity='mgr.y' 2026-03-08T23:54:36.110 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='mgr.14120 192.168.123.104:0/1762411969' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:54:36.110 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='mgr.14120 192.168.123.104:0/1762411969' entity='mgr.y' 2026-03-08T23:54:36.110 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='mgr.14120 192.168.123.104:0/1762411969' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:54:36.110 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='client.14132 -' entity='client.admin' cmd=[{"prefix": "orch set backend", "module_name": "cephadm", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:54:36.110 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='client.14134 -' entity='client.admin' cmd=[{"prefix": "cephadm set-user", "user": "root", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:54:36.110 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='client.14136 -' entity='client.admin' cmd=[{"prefix": "cephadm generate-key", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:54:36.110 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: Generating ssh key... 2026-03-08T23:54:36.110 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='mgr.14120 192.168.123.104:0/1762411969' entity='mgr.y' 2026-03-08T23:54:36.110 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='mgr.14120 192.168.123.104:0/1762411969' entity='mgr.y' 2026-03-08T23:54:36.110 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: mgrmap e8: y(active, since 2s) 2026-03-08T23:54:36.110 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='client.14138 -' entity='client.admin' cmd=[{"prefix": "cephadm get-pub-key", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:54:36.110 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='client.14140 -' entity='client.admin' cmd=[{"prefix": "orch host add", "hostname": "vm04", "addr": "192.168.123.104", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:54:36.110 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: Deploying cephadm binary to vm04 2026-03-08T23:54:36.110 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='mgr.14120 192.168.123.104:0/1762411969' entity='mgr.y' 2026-03-08T23:54:36.110 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='mgr.14120 192.168.123.104:0/1762411969' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:54:36.110 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='mgr.14120 192.168.123.104:0/1762411969' entity='mgr.y' 2026-03-08T23:54:36.110 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='mgr.14120 192.168.123.104:0/1762411969' entity='mgr.y' 2026-03-08T23:54:36.110 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: Added host vm04 2026-03-08T23:54:36.110 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='client.14142 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mon", "unmanaged": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:54:36.110 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: Saving service mon spec with placement count:5 2026-03-08T23:54:36.110 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='client.14144 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mgr", "unmanaged": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:54:36.110 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: Saving service mgr spec with placement count:2 2026-03-08T23:54:36.110 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/2998055250' entity='client.admin' 2026-03-08T23:54:36.110 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/3911216841' entity='client.admin' 2026-03-08T23:54:36.110 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/2096311575' entity='client.admin' cmd=[{"prefix": "mgr module enable", "module": "dashboard"}]: dispatch 2026-03-08T23:54:36.110 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='mgr.14120 192.168.123.104:0/1762411969' entity='mgr.y' 2026-03-08T23:54:36.111 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='mgr.14120 192.168.123.104:0/1762411969' entity='mgr.y' 2026-03-08T23:54:36.111 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/2096311575' entity='client.admin' cmd='[{"prefix": "mgr module enable", "module": "dashboard"}]': finished 2026-03-08T23:54:36.111 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: mgrmap e9: y(active, since 5s) 2026-03-08T23:54:36.111 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/1696376398' entity='client.admin' cmd=[{"prefix": "mgr stat"}]: dispatch 2026-03-08T23:54:36.111 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: Active manager daemon y restarted 2026-03-08T23:54:36.111 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: Activating manager daemon y 2026-03-08T23:54:36.111 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: osdmap e3: 0 total, 0 up, 0 in 2026-03-08T23:54:36.111 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: mgrmap e10: y(active, starting, since 0.0552115s) 2026-03-08T23:54:36.111 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-08T23:54:36.111 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-08T23:54:36.111 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-08T23:54:36.111 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-08T23:54:36.111 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-08T23:54:36.111 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: Manager daemon y is now available 2026-03-08T23:54:36.111 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:54:36.111 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:54:36.111 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:54:36.111 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-08T23:54:36.111 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: [08/Mar/2026:23:54:25] ENGINE Bus STARTING 2026-03-08T23:54:36.111 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: [08/Mar/2026:23:54:26] ENGINE Serving on https://192.168.123.104:7150 2026-03-08T23:54:36.111 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: [08/Mar/2026:23:54:26] ENGINE Bus STARTED 2026-03-08T23:54:36.111 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:36.111 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:36.111 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:36.111 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: mgrmap e11: y(active, since 1.06241s) 2026-03-08T23:54:36.111 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='client.14156 -' entity='client.admin' cmd=[{"prefix": "get_command_descriptions"}]: dispatch 2026-03-08T23:54:36.111 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='client.14156 -' entity='client.admin' cmd=[{"prefix": "mgr_status"}]: dispatch 2026-03-08T23:54:36.111 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='client.14164 -' entity='client.admin' cmd=[{"prefix": "dashboard create-self-signed-cert", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:54:36.111 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:36.111 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:36.111 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:36.111 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='client.14166 -' entity='client.admin' cmd=[{"prefix": "dashboard ac-user-create", "username": "admin", "rolename": "administrator", "force_password": true, "pwd_update_required": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:54:36.111 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/2936813014' entity='client.admin' cmd=[{"prefix": "config get", "who": "mgr", "key": "mgr/dashboard/ssl_server_port"}]: dispatch 2026-03-08T23:54:36.111 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/508858986' entity='client.admin' 2026-03-08T23:54:36.111 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: mgrmap e12: y(active, since 3s) 2026-03-08T23:54:36.111 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:36.111 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm04", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:54:36.111 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:36.111 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:54:36.111 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:36.111 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/2308844034' entity='client.admin' 2026-03-08T23:54:36.111 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:54:36.111 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:36.111 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:36.111 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:54:36.111 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:54:36.111 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:54:36.111 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='client.14176 -' entity='client.admin' cmd=[{"prefix": "orch client-keyring set", "entity": "client.admin", "placement": "*", "mode": "0755", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:54:36.111 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: Updating vm04:/etc/ceph/ceph.conf 2026-03-08T23:54:36.111 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: Updating vm04:/etc/ceph/ceph.client.admin.keyring 2026-03-08T23:54:36.111 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:36.111 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:36.111 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='client.14178 -' entity='client.admin' cmd=[{"prefix": "orch host add", "hostname": "vm10", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:54:36.111 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:36.111 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:54:36.111 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:54:36.111 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:54:36.111 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: Deploying cephadm binary to vm10 2026-03-08T23:54:36.111 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: Added host vm10 2026-03-08T23:54:36.111 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:36.111 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: mgrmap e13: y(active, since 6s) 2026-03-08T23:54:36.111 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:36.111 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/3899310880' entity='client.admin' cmd=[{"prefix": "osd crush tunables", "profile": "default"}]: dispatch 2026-03-08T23:54:36.112 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='client.14180 -' entity='client.admin' cmd=[{"prefix": "orch host ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-08T23:54:36.112 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/3899310880' entity='client.admin' cmd='[{"prefix": "osd crush tunables", "profile": "default"}]': finished 2026-03-08T23:54:36.112 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: osdmap e4: 0 total, 0 up, 0 in 2026-03-08T23:54:36.112 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:36.112 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='client.14184 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mon", "placement": "3;vm04:192.168.123.104=a;vm04:[v2:192.168.123.104:3301,v1:192.168.123.104:6790]=c;vm10:192.168.123.110=b", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:54:36.112 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: Saving service mon spec with placement vm04:192.168.123.104=a;vm04:[v2:192.168.123.104:3301,v1:192.168.123.104:6790]=c;vm10:192.168.123.110=b;count:3 2026-03-08T23:54:36.112 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:36.112 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:36.112 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:36.112 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm10", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:54:36.112 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:36.112 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-08T23:54:36.112 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:54:36.112 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: Deploying daemon mon.c on vm04 2026-03-08T23:54:36.112 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: from='client.? 192.168.123.110:0/2567990561' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-08T23:54:36.112 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: mon.c@-1(synchronizing).paxosservice(auth 1..3) refresh upgraded, format 0 -> 3 2026-03-08T23:54:36.112 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: expand_channel_meta expand map: {default=false} 2026-03-08T23:54:36.112 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: expand_channel_meta from 'false' to 'false' 2026-03-08T23:54:36.112 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: expand_channel_meta expanded map: {default=false} 2026-03-08T23:54:36.112 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: expand_channel_meta expand map: {default=info} 2026-03-08T23:54:36.112 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: expand_channel_meta from 'info' to 'info' 2026-03-08T23:54:36.112 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: expand_channel_meta expanded map: {default=info} 2026-03-08T23:54:36.112 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: expand_channel_meta expand map: {default=daemon} 2026-03-08T23:54:36.112 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: expand_channel_meta from 'daemon' to 'daemon' 2026-03-08T23:54:36.112 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: expand_channel_meta expanded map: {default=daemon} 2026-03-08T23:54:36.112 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: expand_channel_meta expand map: {default=debug} 2026-03-08T23:54:36.112 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: expand_channel_meta from 'debug' to 'debug' 2026-03-08T23:54:36.112 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: expand_channel_meta expanded map: {default=debug} 2026-03-08T23:54:36.112 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:35 vm04 ceph-mon[51053]: mon.c@-1(synchronizing) e1 handle_conf_change mon_allow_pool_delete,mon_cluster_log_to_file 2026-03-08T23:54:36.312 INFO:tasks.cephadm:Waiting for 3 mons in monmap... 2026-03-08T23:54:36.312 DEBUG:teuthology.orchestra.run.vm10:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -- ceph mon dump -f json 2026-03-08T23:54:37.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:37 vm10 ceph-mon[48982]: mon.b@-1(synchronizing) e2 handle_conf_change mon_allow_pool_delete,mon_cluster_log_to_file 2026-03-08T23:54:41.134 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:40 vm04 ceph-mon[46823]: Deploying daemon mon.b on vm10 2026-03-08T23:54:41.134 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:40 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-08T23:54:41.134 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:40 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-08T23:54:41.135 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:40 vm04 ceph-mon[46823]: mon.a calling monitor election 2026-03-08T23:54:41.135 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:40 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-08T23:54:41.135 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:40 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-08T23:54:41.135 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:40 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-08T23:54:41.135 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:40 vm04 ceph-mon[46823]: mon.c calling monitor election 2026-03-08T23:54:41.135 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:40 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-08T23:54:41.135 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:40 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-08T23:54:41.135 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:40 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-08T23:54:41.135 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:40 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-08T23:54:41.135 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:40 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-08T23:54:41.135 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:40 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-08T23:54:41.135 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:40 vm04 ceph-mon[46823]: mon.a is new leader, mons a,c in quorum (ranks 0,1) 2026-03-08T23:54:41.135 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:40 vm04 ceph-mon[46823]: monmap e2: 2 mons at {a=[v2:192.168.123.104:3300/0,v1:192.168.123.104:6789/0],c=[v2:192.168.123.104:3301/0,v1:192.168.123.104:6790/0]} 2026-03-08T23:54:41.135 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:40 vm04 ceph-mon[46823]: fsmap 2026-03-08T23:54:41.135 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:40 vm04 ceph-mon[46823]: osdmap e4: 0 total, 0 up, 0 in 2026-03-08T23:54:41.135 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:40 vm04 ceph-mon[46823]: mgrmap e13: y(active, since 15s) 2026-03-08T23:54:41.135 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:40 vm04 ceph-mon[46823]: overall HEALTH_OK 2026-03-08T23:54:41.135 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:40 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:41.135 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:40 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:54:41.135 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:40 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:54:41.135 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:40 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:54:41.135 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:40 vm04 ceph-mon[51053]: Deploying daemon mon.b on vm10 2026-03-08T23:54:41.135 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:40 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-08T23:54:41.135 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:40 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-08T23:54:41.135 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:40 vm04 ceph-mon[51053]: mon.a calling monitor election 2026-03-08T23:54:41.135 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:40 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-08T23:54:41.135 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:40 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-08T23:54:41.135 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:40 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-08T23:54:41.135 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:40 vm04 ceph-mon[51053]: mon.c calling monitor election 2026-03-08T23:54:41.135 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:40 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-08T23:54:41.135 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:40 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-08T23:54:41.135 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:40 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-08T23:54:41.135 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:40 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-08T23:54:41.135 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:40 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-08T23:54:41.135 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:40 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-08T23:54:41.135 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:40 vm04 ceph-mon[51053]: mon.a is new leader, mons a,c in quorum (ranks 0,1) 2026-03-08T23:54:41.135 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:40 vm04 ceph-mon[51053]: monmap e2: 2 mons at {a=[v2:192.168.123.104:3300/0,v1:192.168.123.104:6789/0],c=[v2:192.168.123.104:3301/0,v1:192.168.123.104:6790/0]} 2026-03-08T23:54:41.135 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:40 vm04 ceph-mon[51053]: fsmap 2026-03-08T23:54:41.135 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:40 vm04 ceph-mon[51053]: osdmap e4: 0 total, 0 up, 0 in 2026-03-08T23:54:41.135 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:40 vm04 ceph-mon[51053]: mgrmap e13: y(active, since 15s) 2026-03-08T23:54:41.135 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:40 vm04 ceph-mon[51053]: overall HEALTH_OK 2026-03-08T23:54:41.135 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:40 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:41.135 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:40 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:54:41.135 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:40 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:54:41.135 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:40 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:54:46.534 INFO:teuthology.orchestra.run.vm10.stdout: 2026-03-08T23:54:46.534 INFO:teuthology.orchestra.run.vm10.stdout:{"epoch":3,"fsid":"fdcbddf6-1b49-11f1-80b0-7392062373f9","modified":"2026-03-08T23:54:41.353218Z","created":"2026-03-08T23:53:57.979597Z","min_mon_release":17,"min_mon_release_name":"quincy","election_strategy":1,"disallowed_leaders: ":"","stretch_mode":false,"tiebreaker_mon":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy"],"optional":[]},"mons":[{"rank":0,"name":"a","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:3300","nonce":0},{"type":"v1","addr":"192.168.123.104:6789","nonce":0}]},"addr":"192.168.123.104:6789/0","public_addr":"192.168.123.104:6789/0","priority":0,"weight":0,"crush_location":"{}"},{"rank":1,"name":"c","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:3301","nonce":0},{"type":"v1","addr":"192.168.123.104:6790","nonce":0}]},"addr":"192.168.123.104:6790/0","public_addr":"192.168.123.104:6790/0","priority":0,"weight":0,"crush_location":"{}"},{"rank":2,"name":"b","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.110:3300","nonce":0},{"type":"v1","addr":"192.168.123.110:6789","nonce":0}]},"addr":"192.168.123.110:6789/0","public_addr":"192.168.123.110:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0,1]} 2026-03-08T23:54:46.537 INFO:teuthology.orchestra.run.vm10.stderr:dumped monmap epoch 3 2026-03-08T23:54:46.607 INFO:tasks.cephadm:Generating final ceph.conf file... 2026-03-08T23:54:46.607 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -- ceph config generate-minimal-conf 2026-03-08T23:54:46.664 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:46 vm04 ceph-mon[46823]: Updating vm10:/etc/ceph/ceph.conf 2026-03-08T23:54:46.665 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:46 vm04 ceph-mon[46823]: Updating vm04:/etc/ceph/ceph.conf 2026-03-08T23:54:46.665 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:46 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-08T23:54:46.665 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:46 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-08T23:54:46.665 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:46 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-08T23:54:46.665 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:46 vm04 ceph-mon[46823]: mon.a calling monitor election 2026-03-08T23:54:46.665 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:46 vm04 ceph-mon[46823]: mon.c calling monitor election 2026-03-08T23:54:46.665 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:46 vm04 ceph-mon[46823]: Updating vm10:/etc/ceph/ceph.client.admin.keyring 2026-03-08T23:54:46.665 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:46 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-08T23:54:46.665 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:46 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-08T23:54:46.665 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:46 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-08T23:54:46.665 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:46 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-08T23:54:46.665 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:46 vm04 ceph-mon[46823]: pgmap v4: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-08T23:54:46.665 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:46 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-08T23:54:46.665 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:46 vm04 ceph-mon[46823]: mon.a is new leader, mons a,c in quorum (ranks 0,1) 2026-03-08T23:54:46.665 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:46 vm04 ceph-mon[46823]: monmap e3: 3 mons at {a=[v2:192.168.123.104:3300/0,v1:192.168.123.104:6789/0],b=[v2:192.168.123.110:3300/0,v1:192.168.123.110:6789/0],c=[v2:192.168.123.104:3301/0,v1:192.168.123.104:6790/0]} 2026-03-08T23:54:46.665 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:46 vm04 ceph-mon[46823]: fsmap 2026-03-08T23:54:46.665 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:46 vm04 ceph-mon[46823]: osdmap e4: 0 total, 0 up, 0 in 2026-03-08T23:54:46.665 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:46 vm04 ceph-mon[46823]: mgrmap e13: y(active, since 20s) 2026-03-08T23:54:46.665 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:46 vm04 ceph-mon[46823]: overall HEALTH_OK 2026-03-08T23:54:46.665 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:46 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:46.665 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:46 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:46.665 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:46 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:46.665 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:46 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:46.665 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:46 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-08T23:54:46.665 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:46 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-08T23:54:46.665 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:46 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:54:46.665 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:46 vm04 ceph-mon[51053]: Updating vm10:/etc/ceph/ceph.conf 2026-03-08T23:54:46.665 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:46 vm04 ceph-mon[51053]: Updating vm04:/etc/ceph/ceph.conf 2026-03-08T23:54:46.665 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:46 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-08T23:54:46.665 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:46 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-08T23:54:46.665 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:46 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-08T23:54:46.665 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:46 vm04 ceph-mon[51053]: mon.a calling monitor election 2026-03-08T23:54:46.665 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:46 vm04 ceph-mon[51053]: mon.c calling monitor election 2026-03-08T23:54:46.665 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:46 vm04 ceph-mon[51053]: Updating vm10:/etc/ceph/ceph.client.admin.keyring 2026-03-08T23:54:46.666 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:46 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-08T23:54:46.666 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:46 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-08T23:54:46.666 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:46 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-08T23:54:46.666 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:46 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-08T23:54:46.666 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:46 vm04 ceph-mon[51053]: pgmap v4: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-08T23:54:46.666 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:46 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-08T23:54:46.666 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:46 vm04 ceph-mon[51053]: mon.a is new leader, mons a,c in quorum (ranks 0,1) 2026-03-08T23:54:46.666 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:46 vm04 ceph-mon[51053]: monmap e3: 3 mons at {a=[v2:192.168.123.104:3300/0,v1:192.168.123.104:6789/0],b=[v2:192.168.123.110:3300/0,v1:192.168.123.110:6789/0],c=[v2:192.168.123.104:3301/0,v1:192.168.123.104:6790/0]} 2026-03-08T23:54:46.666 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:46 vm04 ceph-mon[51053]: fsmap 2026-03-08T23:54:46.666 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:46 vm04 ceph-mon[51053]: osdmap e4: 0 total, 0 up, 0 in 2026-03-08T23:54:46.666 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:46 vm04 ceph-mon[51053]: mgrmap e13: y(active, since 20s) 2026-03-08T23:54:46.666 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:46 vm04 ceph-mon[51053]: overall HEALTH_OK 2026-03-08T23:54:46.666 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:46 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:46.666 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:46 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:46.666 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:46 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:46.666 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:46 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:46.666 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:46 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-08T23:54:46.666 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:46 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-08T23:54:46.666 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:46 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:54:47.171 INFO:teuthology.orchestra.run.vm04.stdout:# minimal ceph.conf for fdcbddf6-1b49-11f1-80b0-7392062373f9 2026-03-08T23:54:47.171 INFO:teuthology.orchestra.run.vm04.stdout:[global] 2026-03-08T23:54:47.171 INFO:teuthology.orchestra.run.vm04.stdout: fsid = fdcbddf6-1b49-11f1-80b0-7392062373f9 2026-03-08T23:54:47.171 INFO:teuthology.orchestra.run.vm04.stdout: mon_host = [v2:192.168.123.104:3300/0,v1:192.168.123.104:6789/0] [v2:192.168.123.110:3300/0,v1:192.168.123.110:6789/0] [v2:192.168.123.104:3301/0,v1:192.168.123.104:6790/0] 2026-03-08T23:54:47.236 INFO:tasks.cephadm:Distributing (final) config and client.admin keyring... 2026-03-08T23:54:47.236 DEBUG:teuthology.orchestra.run.vm04:> set -ex 2026-03-08T23:54:47.236 DEBUG:teuthology.orchestra.run.vm04:> sudo dd of=/etc/ceph/ceph.conf 2026-03-08T23:54:47.263 DEBUG:teuthology.orchestra.run.vm04:> set -ex 2026-03-08T23:54:47.263 DEBUG:teuthology.orchestra.run.vm04:> sudo dd of=/etc/ceph/ceph.client.admin.keyring 2026-03-08T23:54:47.332 DEBUG:teuthology.orchestra.run.vm10:> set -ex 2026-03-08T23:54:47.332 DEBUG:teuthology.orchestra.run.vm10:> sudo dd of=/etc/ceph/ceph.conf 2026-03-08T23:54:47.362 DEBUG:teuthology.orchestra.run.vm10:> set -ex 2026-03-08T23:54:47.362 DEBUG:teuthology.orchestra.run.vm10:> sudo dd of=/etc/ceph/ceph.client.admin.keyring 2026-03-08T23:54:47.434 INFO:tasks.cephadm:Adding mgr.y on vm04 2026-03-08T23:54:47.434 INFO:tasks.cephadm:Adding mgr.x on vm10 2026-03-08T23:54:47.434 DEBUG:teuthology.orchestra.run.vm10:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -- ceph orch apply mgr '2;vm04=y;vm10=x' 2026-03-08T23:54:47.441 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:47 vm04 ceph-mon[51053]: Reconfiguring mon.a (unknown last config time)... 2026-03-08T23:54:47.441 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:47 vm04 ceph-mon[51053]: Reconfiguring daemon mon.a on vm04 2026-03-08T23:54:47.441 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:47 vm04 ceph-mon[51053]: from='client.? 192.168.123.110:0/3176898544' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-08T23:54:47.441 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:47 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:47.441 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:47 vm04 ceph-mon[51053]: Reconfiguring mon.c (monmap changed)... 2026-03-08T23:54:47.441 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:47 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-08T23:54:47.441 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:47 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-08T23:54:47.441 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:47 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:54:47.441 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:47 vm04 ceph-mon[51053]: Reconfiguring daemon mon.c on vm04 2026-03-08T23:54:47.441 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:47 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:47.441 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:47 vm04 ceph-mon[51053]: Reconfiguring mon.b (monmap changed)... 2026-03-08T23:54:47.441 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:47 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-08T23:54:47.441 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:47 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-08T23:54:47.441 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:47 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:54:47.441 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:47 vm04 ceph-mon[51053]: Reconfiguring daemon mon.b on vm10 2026-03-08T23:54:47.441 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:47 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/661728680' entity='client.admin' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:54:47.441 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:47 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-08T23:54:47.441 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:47 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:47.441 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:47 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:54:47.441 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:47 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:54:47.441 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:47 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:54:47.441 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:47 vm04 ceph-mon[46823]: Reconfiguring mon.a (unknown last config time)... 2026-03-08T23:54:47.441 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:47 vm04 ceph-mon[46823]: Reconfiguring daemon mon.a on vm04 2026-03-08T23:54:47.441 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:47 vm04 ceph-mon[46823]: from='client.? 192.168.123.110:0/3176898544' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-08T23:54:47.441 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:47 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:47.441 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:47 vm04 ceph-mon[46823]: Reconfiguring mon.c (monmap changed)... 2026-03-08T23:54:47.441 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:47 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-08T23:54:47.441 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:47 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-08T23:54:47.441 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:47 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:54:47.441 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:47 vm04 ceph-mon[46823]: Reconfiguring daemon mon.c on vm04 2026-03-08T23:54:47.441 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:47 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:47.441 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:47 vm04 ceph-mon[46823]: Reconfiguring mon.b (monmap changed)... 2026-03-08T23:54:47.441 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:47 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-08T23:54:47.441 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:47 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-08T23:54:47.441 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:47 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:54:47.441 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:47 vm04 ceph-mon[46823]: Reconfiguring daemon mon.b on vm10 2026-03-08T23:54:47.441 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:47 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/661728680' entity='client.admin' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:54:47.441 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:47 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-08T23:54:47.441 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:47 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:47.441 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:47 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:54:47.442 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:47 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:54:47.442 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:47 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:54:47.982 INFO:teuthology.orchestra.run.vm10.stdout:Scheduled mgr update... 2026-03-08T23:54:48.054 DEBUG:teuthology.orchestra.run.vm10:mgr.x> sudo journalctl -f -n 0 -u ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@mgr.x.service 2026-03-08T23:54:48.056 INFO:tasks.cephadm:Deploying OSDs... 2026-03-08T23:54:48.056 DEBUG:teuthology.orchestra.run.vm04:> set -ex 2026-03-08T23:54:48.056 DEBUG:teuthology.orchestra.run.vm04:> dd if=/scratch_devs of=/dev/stdout 2026-03-08T23:54:48.072 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-08T23:54:48.073 DEBUG:teuthology.orchestra.run.vm04:> ls /dev/[sv]d? 2026-03-08T23:54:48.131 INFO:teuthology.orchestra.run.vm04.stdout:/dev/vda 2026-03-08T23:54:48.131 INFO:teuthology.orchestra.run.vm04.stdout:/dev/vdb 2026-03-08T23:54:48.131 INFO:teuthology.orchestra.run.vm04.stdout:/dev/vdc 2026-03-08T23:54:48.131 INFO:teuthology.orchestra.run.vm04.stdout:/dev/vdd 2026-03-08T23:54:48.131 INFO:teuthology.orchestra.run.vm04.stdout:/dev/vde 2026-03-08T23:54:48.131 WARNING:teuthology.misc:Removing root device: /dev/vda from device list 2026-03-08T23:54:48.131 DEBUG:teuthology.misc:devs=['/dev/vdb', '/dev/vdc', '/dev/vdd', '/dev/vde'] 2026-03-08T23:54:48.131 DEBUG:teuthology.orchestra.run.vm04:> stat /dev/vdb 2026-03-08T23:54:48.190 INFO:teuthology.orchestra.run.vm04.stdout: File: /dev/vdb 2026-03-08T23:54:48.190 INFO:teuthology.orchestra.run.vm04.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-08T23:54:48.190 INFO:teuthology.orchestra.run.vm04.stdout:Device: 6h/6d Inode: 222 Links: 1 Device type: fc,10 2026-03-08T23:54:48.190 INFO:teuthology.orchestra.run.vm04.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-08T23:54:48.190 INFO:teuthology.orchestra.run.vm04.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-08T23:54:48.190 INFO:teuthology.orchestra.run.vm04.stdout:Access: 2026-03-08 23:54:29.270078517 +0000 2026-03-08T23:54:48.190 INFO:teuthology.orchestra.run.vm04.stdout:Modify: 2026-03-08 23:54:29.020078306 +0000 2026-03-08T23:54:48.190 INFO:teuthology.orchestra.run.vm04.stdout:Change: 2026-03-08 23:54:29.020078306 +0000 2026-03-08T23:54:48.190 INFO:teuthology.orchestra.run.vm04.stdout: Birth: 2026-03-08 23:49:55.251000000 +0000 2026-03-08T23:54:48.190 DEBUG:teuthology.orchestra.run.vm04:> sudo dd if=/dev/vdb of=/dev/null count=1 2026-03-08T23:54:48.257 INFO:teuthology.orchestra.run.vm04.stderr:1+0 records in 2026-03-08T23:54:48.257 INFO:teuthology.orchestra.run.vm04.stderr:1+0 records out 2026-03-08T23:54:48.257 INFO:teuthology.orchestra.run.vm04.stderr:512 bytes copied, 0.000183022 s, 2.8 MB/s 2026-03-08T23:54:48.258 DEBUG:teuthology.orchestra.run.vm04:> ! mount | grep -v devtmpfs | grep -q /dev/vdb 2026-03-08T23:54:48.316 DEBUG:teuthology.orchestra.run.vm04:> stat /dev/vdc 2026-03-08T23:54:48.377 INFO:teuthology.orchestra.run.vm04.stdout: File: /dev/vdc 2026-03-08T23:54:48.378 INFO:teuthology.orchestra.run.vm04.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-08T23:54:48.378 INFO:teuthology.orchestra.run.vm04.stdout:Device: 6h/6d Inode: 223 Links: 1 Device type: fc,20 2026-03-08T23:54:48.378 INFO:teuthology.orchestra.run.vm04.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-08T23:54:48.378 INFO:teuthology.orchestra.run.vm04.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-08T23:54:48.378 INFO:teuthology.orchestra.run.vm04.stdout:Access: 2026-03-08 23:54:29.334078571 +0000 2026-03-08T23:54:48.378 INFO:teuthology.orchestra.run.vm04.stdout:Modify: 2026-03-08 23:54:29.020078306 +0000 2026-03-08T23:54:48.378 INFO:teuthology.orchestra.run.vm04.stdout:Change: 2026-03-08 23:54:29.020078306 +0000 2026-03-08T23:54:48.378 INFO:teuthology.orchestra.run.vm04.stdout: Birth: 2026-03-08 23:49:55.255000000 +0000 2026-03-08T23:54:48.378 DEBUG:teuthology.orchestra.run.vm04:> sudo dd if=/dev/vdc of=/dev/null count=1 2026-03-08T23:54:48.445 INFO:teuthology.orchestra.run.vm04.stderr:1+0 records in 2026-03-08T23:54:48.445 INFO:teuthology.orchestra.run.vm04.stderr:1+0 records out 2026-03-08T23:54:48.445 INFO:teuthology.orchestra.run.vm04.stderr:512 bytes copied, 0.000116348 s, 4.4 MB/s 2026-03-08T23:54:48.446 DEBUG:teuthology.orchestra.run.vm04:> ! mount | grep -v devtmpfs | grep -q /dev/vdc 2026-03-08T23:54:48.505 DEBUG:teuthology.orchestra.run.vm04:> stat /dev/vdd 2026-03-08T23:54:48.547 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:48 vm10 ceph-mon[48982]: Deploying daemon mon.b on vm10 2026-03-08T23:54:48.547 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:48 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-08T23:54:48.547 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:48 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-08T23:54:48.547 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:48 vm10 ceph-mon[48982]: mon.a calling monitor election 2026-03-08T23:54:48.547 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:48 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-08T23:54:48.547 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:48 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-08T23:54:48.548 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:48 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-08T23:54:48.548 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:48 vm10 ceph-mon[48982]: mon.c calling monitor election 2026-03-08T23:54:48.548 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:48 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-08T23:54:48.548 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:48 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-08T23:54:48.548 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:48 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-08T23:54:48.548 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:48 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-08T23:54:48.548 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:48 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-08T23:54:48.548 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:48 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-08T23:54:48.548 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:48 vm10 ceph-mon[48982]: mon.a is new leader, mons a,c in quorum (ranks 0,1) 2026-03-08T23:54:48.548 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:48 vm10 ceph-mon[48982]: monmap e2: 2 mons at {a=[v2:192.168.123.104:3300/0,v1:192.168.123.104:6789/0],c=[v2:192.168.123.104:3301/0,v1:192.168.123.104:6790/0]} 2026-03-08T23:54:48.548 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:48 vm10 ceph-mon[48982]: fsmap 2026-03-08T23:54:48.548 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:48 vm10 ceph-mon[48982]: osdmap e4: 0 total, 0 up, 0 in 2026-03-08T23:54:48.548 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:48 vm10 ceph-mon[48982]: mgrmap e13: y(active, since 15s) 2026-03-08T23:54:48.548 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:48 vm10 ceph-mon[48982]: overall HEALTH_OK 2026-03-08T23:54:48.548 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:48 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:48.548 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:48 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:54:48.548 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:48 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:54:48.548 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:48 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:54:48.548 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:48 vm10 ceph-mon[48982]: Updating vm10:/etc/ceph/ceph.conf 2026-03-08T23:54:48.548 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:48 vm10 ceph-mon[48982]: Updating vm04:/etc/ceph/ceph.conf 2026-03-08T23:54:48.548 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:48 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-08T23:54:48.548 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:48 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-08T23:54:48.548 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:48 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-08T23:54:48.548 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:48 vm10 ceph-mon[48982]: mon.a calling monitor election 2026-03-08T23:54:48.548 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:48 vm10 ceph-mon[48982]: mon.c calling monitor election 2026-03-08T23:54:48.548 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:48 vm10 ceph-mon[48982]: Updating vm10:/etc/ceph/ceph.client.admin.keyring 2026-03-08T23:54:48.548 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:48 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-08T23:54:48.548 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:48 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-08T23:54:48.548 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:48 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-08T23:54:48.548 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:48 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-08T23:54:48.548 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:48 vm10 ceph-mon[48982]: pgmap v4: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-08T23:54:48.548 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:48 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-08T23:54:48.548 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:48 vm10 ceph-mon[48982]: mon.a is new leader, mons a,c in quorum (ranks 0,1) 2026-03-08T23:54:48.548 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:48 vm10 ceph-mon[48982]: monmap e3: 3 mons at {a=[v2:192.168.123.104:3300/0,v1:192.168.123.104:6789/0],b=[v2:192.168.123.110:3300/0,v1:192.168.123.110:6789/0],c=[v2:192.168.123.104:3301/0,v1:192.168.123.104:6790/0]} 2026-03-08T23:54:48.548 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:48 vm10 ceph-mon[48982]: fsmap 2026-03-08T23:54:48.548 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:48 vm10 ceph-mon[48982]: osdmap e4: 0 total, 0 up, 0 in 2026-03-08T23:54:48.548 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:48 vm10 ceph-mon[48982]: mgrmap e13: y(active, since 20s) 2026-03-08T23:54:48.548 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:48 vm10 ceph-mon[48982]: overall HEALTH_OK 2026-03-08T23:54:48.548 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:48 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:48.548 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:48 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:48.548 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:48 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:48.548 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:48 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:48.548 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:48 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-08T23:54:48.548 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:48 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-08T23:54:48.548 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:48 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:54:48.548 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:48 vm10 ceph-mon[48982]: Reconfiguring mon.a (unknown last config time)... 2026-03-08T23:54:48.548 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:48 vm10 ceph-mon[48982]: Reconfiguring daemon mon.a on vm04 2026-03-08T23:54:48.548 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:48 vm10 ceph-mon[48982]: from='client.? 192.168.123.110:0/3176898544' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-08T23:54:48.548 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:48 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:48.548 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:48 vm10 ceph-mon[48982]: Reconfiguring mon.c (monmap changed)... 2026-03-08T23:54:48.548 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:48 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-08T23:54:48.548 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:48 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-08T23:54:48.548 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:48 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:54:48.548 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:48 vm10 ceph-mon[48982]: Reconfiguring daemon mon.c on vm04 2026-03-08T23:54:48.548 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:48 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:48.548 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:48 vm10 ceph-mon[48982]: Reconfiguring mon.b (monmap changed)... 2026-03-08T23:54:48.548 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:48 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-08T23:54:48.548 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:48 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-08T23:54:48.548 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:48 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:54:48.549 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:48 vm10 ceph-mon[48982]: Reconfiguring daemon mon.b on vm10 2026-03-08T23:54:48.549 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:48 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/661728680' entity='client.admin' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:54:48.549 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:48 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-08T23:54:48.549 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:48 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:48.549 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:48 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:54:48.549 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:48 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:54:48.549 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:48 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:54:48.564 INFO:teuthology.orchestra.run.vm04.stdout: File: /dev/vdd 2026-03-08T23:54:48.564 INFO:teuthology.orchestra.run.vm04.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-08T23:54:48.564 INFO:teuthology.orchestra.run.vm04.stdout:Device: 6h/6d Inode: 256 Links: 1 Device type: fc,30 2026-03-08T23:54:48.564 INFO:teuthology.orchestra.run.vm04.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-08T23:54:48.564 INFO:teuthology.orchestra.run.vm04.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-08T23:54:48.564 INFO:teuthology.orchestra.run.vm04.stdout:Access: 2026-03-08 23:54:29.405078631 +0000 2026-03-08T23:54:48.564 INFO:teuthology.orchestra.run.vm04.stdout:Modify: 2026-03-08 23:54:29.014078301 +0000 2026-03-08T23:54:48.564 INFO:teuthology.orchestra.run.vm04.stdout:Change: 2026-03-08 23:54:29.014078301 +0000 2026-03-08T23:54:48.565 INFO:teuthology.orchestra.run.vm04.stdout: Birth: 2026-03-08 23:49:55.272000000 +0000 2026-03-08T23:54:48.565 DEBUG:teuthology.orchestra.run.vm04:> sudo dd if=/dev/vdd of=/dev/null count=1 2026-03-08T23:54:48.629 INFO:teuthology.orchestra.run.vm04.stderr:1+0 records in 2026-03-08T23:54:48.629 INFO:teuthology.orchestra.run.vm04.stderr:1+0 records out 2026-03-08T23:54:48.629 INFO:teuthology.orchestra.run.vm04.stderr:512 bytes copied, 0.000122569 s, 4.2 MB/s 2026-03-08T23:54:48.630 DEBUG:teuthology.orchestra.run.vm04:> ! mount | grep -v devtmpfs | grep -q /dev/vdd 2026-03-08T23:54:48.688 DEBUG:teuthology.orchestra.run.vm04:> stat /dev/vde 2026-03-08T23:54:48.748 INFO:teuthology.orchestra.run.vm04.stdout: File: /dev/vde 2026-03-08T23:54:48.748 INFO:teuthology.orchestra.run.vm04.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-08T23:54:48.748 INFO:teuthology.orchestra.run.vm04.stdout:Device: 6h/6d Inode: 257 Links: 1 Device type: fc,40 2026-03-08T23:54:48.748 INFO:teuthology.orchestra.run.vm04.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-08T23:54:48.748 INFO:teuthology.orchestra.run.vm04.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-08T23:54:48.748 INFO:teuthology.orchestra.run.vm04.stdout:Access: 2026-03-08 23:54:29.470078686 +0000 2026-03-08T23:54:48.748 INFO:teuthology.orchestra.run.vm04.stdout:Modify: 2026-03-08 23:54:29.016078302 +0000 2026-03-08T23:54:48.748 INFO:teuthology.orchestra.run.vm04.stdout:Change: 2026-03-08 23:54:29.016078302 +0000 2026-03-08T23:54:48.748 INFO:teuthology.orchestra.run.vm04.stdout: Birth: 2026-03-08 23:49:55.311000000 +0000 2026-03-08T23:54:48.748 DEBUG:teuthology.orchestra.run.vm04:> sudo dd if=/dev/vde of=/dev/null count=1 2026-03-08T23:54:48.819 INFO:teuthology.orchestra.run.vm04.stderr:1+0 records in 2026-03-08T23:54:48.819 INFO:teuthology.orchestra.run.vm04.stderr:1+0 records out 2026-03-08T23:54:48.819 INFO:teuthology.orchestra.run.vm04.stderr:512 bytes copied, 0.000130314 s, 3.9 MB/s 2026-03-08T23:54:48.821 DEBUG:teuthology.orchestra.run.vm04:> ! mount | grep -v devtmpfs | grep -q /dev/vde 2026-03-08T23:54:48.881 DEBUG:teuthology.orchestra.run.vm10:> set -ex 2026-03-08T23:54:48.881 DEBUG:teuthology.orchestra.run.vm10:> dd if=/scratch_devs of=/dev/stdout 2026-03-08T23:54:48.939 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-08T23:54:48.939 DEBUG:teuthology.orchestra.run.vm10:> ls /dev/[sv]d? 2026-03-08T23:54:49.000 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:48 vm04 ceph-mon[46823]: mon.b calling monitor election 2026-03-08T23:54:49.000 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:48 vm04 ceph-mon[46823]: mon.b calling monitor election 2026-03-08T23:54:49.000 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:48 vm04 ceph-mon[46823]: mon.c calling monitor election 2026-03-08T23:54:49.000 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:48 vm04 ceph-mon[46823]: mon.a calling monitor election 2026-03-08T23:54:49.000 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:48 vm04 ceph-mon[46823]: mon.a is new leader, mons a,c,b in quorum (ranks 0,1,2) 2026-03-08T23:54:49.000 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:48 vm04 ceph-mon[46823]: monmap e3: 3 mons at {a=[v2:192.168.123.104:3300/0,v1:192.168.123.104:6789/0],b=[v2:192.168.123.110:3300/0,v1:192.168.123.110:6789/0],c=[v2:192.168.123.104:3301/0,v1:192.168.123.104:6790/0]} 2026-03-08T23:54:49.000 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:48 vm04 ceph-mon[46823]: fsmap 2026-03-08T23:54:49.000 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:48 vm04 ceph-mon[46823]: osdmap e4: 0 total, 0 up, 0 in 2026-03-08T23:54:49.000 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:48 vm04 ceph-mon[46823]: mgrmap e13: y(active, since 22s) 2026-03-08T23:54:49.000 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:48 vm04 ceph-mon[46823]: overall HEALTH_OK 2026-03-08T23:54:49.000 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:48 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:49.001 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:48 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:54:49.001 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:48 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:54:49.001 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:48 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:54:49.023 INFO:teuthology.orchestra.run.vm10.stdout:/dev/vda 2026-03-08T23:54:49.023 INFO:teuthology.orchestra.run.vm10.stdout:/dev/vdb 2026-03-08T23:54:49.023 INFO:teuthology.orchestra.run.vm10.stdout:/dev/vdc 2026-03-08T23:54:49.023 INFO:teuthology.orchestra.run.vm10.stdout:/dev/vdd 2026-03-08T23:54:49.023 INFO:teuthology.orchestra.run.vm10.stdout:/dev/vde 2026-03-08T23:54:49.023 WARNING:teuthology.misc:Removing root device: /dev/vda from device list 2026-03-08T23:54:49.023 DEBUG:teuthology.misc:devs=['/dev/vdb', '/dev/vdc', '/dev/vdd', '/dev/vde'] 2026-03-08T23:54:49.023 DEBUG:teuthology.orchestra.run.vm10:> stat /dev/vdb 2026-03-08T23:54:49.065 INFO:teuthology.orchestra.run.vm10.stdout: File: /dev/vdb 2026-03-08T23:54:49.066 INFO:teuthology.orchestra.run.vm10.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-08T23:54:49.066 INFO:teuthology.orchestra.run.vm10.stdout:Device: 6h/6d Inode: 254 Links: 1 Device type: fc,10 2026-03-08T23:54:49.066 INFO:teuthology.orchestra.run.vm10.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-08T23:54:49.066 INFO:teuthology.orchestra.run.vm10.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-08T23:54:49.066 INFO:teuthology.orchestra.run.vm10.stdout:Access: 2026-03-08 23:54:34.527566474 +0000 2026-03-08T23:54:49.066 INFO:teuthology.orchestra.run.vm10.stdout:Modify: 2026-03-08 23:54:34.280280520 +0000 2026-03-08T23:54:49.066 INFO:teuthology.orchestra.run.vm10.stdout:Change: 2026-03-08 23:54:34.280280520 +0000 2026-03-08T23:54:49.066 INFO:teuthology.orchestra.run.vm10.stdout: Birth: 2026-03-08 23:50:26.237000000 +0000 2026-03-08T23:54:49.066 DEBUG:teuthology.orchestra.run.vm10:> sudo dd if=/dev/vdb of=/dev/null count=1 2026-03-08T23:54:49.178 INFO:teuthology.orchestra.run.vm10.stderr:1+0 records in 2026-03-08T23:54:49.178 INFO:teuthology.orchestra.run.vm10.stderr:1+0 records out 2026-03-08T23:54:49.178 INFO:teuthology.orchestra.run.vm10.stderr:512 bytes copied, 0.00019781 s, 2.6 MB/s 2026-03-08T23:54:49.178 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:48 vm10 ceph-mon[48982]: mon.b calling monitor election 2026-03-08T23:54:49.178 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:48 vm10 ceph-mon[48982]: mon.b calling monitor election 2026-03-08T23:54:49.178 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:48 vm10 ceph-mon[48982]: mon.c calling monitor election 2026-03-08T23:54:49.178 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:48 vm10 ceph-mon[48982]: mon.a calling monitor election 2026-03-08T23:54:49.178 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:48 vm10 ceph-mon[48982]: mon.a is new leader, mons a,c,b in quorum (ranks 0,1,2) 2026-03-08T23:54:49.178 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:48 vm10 ceph-mon[48982]: monmap e3: 3 mons at {a=[v2:192.168.123.104:3300/0,v1:192.168.123.104:6789/0],b=[v2:192.168.123.110:3300/0,v1:192.168.123.110:6789/0],c=[v2:192.168.123.104:3301/0,v1:192.168.123.104:6790/0]} 2026-03-08T23:54:49.178 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:48 vm10 ceph-mon[48982]: fsmap 2026-03-08T23:54:49.178 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:48 vm10 ceph-mon[48982]: osdmap e4: 0 total, 0 up, 0 in 2026-03-08T23:54:49.178 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:48 vm10 ceph-mon[48982]: mgrmap e13: y(active, since 22s) 2026-03-08T23:54:49.178 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:48 vm10 ceph-mon[48982]: overall HEALTH_OK 2026-03-08T23:54:49.178 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:48 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:49.178 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:48 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:54:49.178 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:48 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:54:49.178 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:48 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:54:49.179 DEBUG:teuthology.orchestra.run.vm10:> ! mount | grep -v devtmpfs | grep -q /dev/vdb 2026-03-08T23:54:49.206 DEBUG:teuthology.orchestra.run.vm10:> stat /dev/vdc 2026-03-08T23:54:49.267 INFO:teuthology.orchestra.run.vm10.stdout: File: /dev/vdc 2026-03-08T23:54:49.267 INFO:teuthology.orchestra.run.vm10.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-08T23:54:49.267 INFO:teuthology.orchestra.run.vm10.stdout:Device: 6h/6d Inode: 255 Links: 1 Device type: fc,20 2026-03-08T23:54:49.267 INFO:teuthology.orchestra.run.vm10.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-08T23:54:49.267 INFO:teuthology.orchestra.run.vm10.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-08T23:54:49.267 INFO:teuthology.orchestra.run.vm10.stdout:Access: 2026-03-08 23:54:34.585633621 +0000 2026-03-08T23:54:49.267 INFO:teuthology.orchestra.run.vm10.stdout:Modify: 2026-03-08 23:54:34.280280520 +0000 2026-03-08T23:54:49.267 INFO:teuthology.orchestra.run.vm10.stdout:Change: 2026-03-08 23:54:34.280280520 +0000 2026-03-08T23:54:49.267 INFO:teuthology.orchestra.run.vm10.stdout: Birth: 2026-03-08 23:50:26.240000000 +0000 2026-03-08T23:54:49.267 DEBUG:teuthology.orchestra.run.vm10:> sudo dd if=/dev/vdc of=/dev/null count=1 2026-03-08T23:54:49.349 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:48 vm04 ceph-mon[51053]: mon.b calling monitor election 2026-03-08T23:54:49.349 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:48 vm04 ceph-mon[51053]: mon.b calling monitor election 2026-03-08T23:54:49.349 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:48 vm04 ceph-mon[51053]: mon.c calling monitor election 2026-03-08T23:54:49.349 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:48 vm04 ceph-mon[51053]: mon.a calling monitor election 2026-03-08T23:54:49.349 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:48 vm04 ceph-mon[51053]: mon.a is new leader, mons a,c,b in quorum (ranks 0,1,2) 2026-03-08T23:54:49.349 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:48 vm04 ceph-mon[51053]: monmap e3: 3 mons at {a=[v2:192.168.123.104:3300/0,v1:192.168.123.104:6789/0],b=[v2:192.168.123.110:3300/0,v1:192.168.123.110:6789/0],c=[v2:192.168.123.104:3301/0,v1:192.168.123.104:6790/0]} 2026-03-08T23:54:49.349 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:48 vm04 ceph-mon[51053]: fsmap 2026-03-08T23:54:49.349 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:48 vm04 ceph-mon[51053]: osdmap e4: 0 total, 0 up, 0 in 2026-03-08T23:54:49.349 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:48 vm04 ceph-mon[51053]: mgrmap e13: y(active, since 22s) 2026-03-08T23:54:49.349 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:48 vm04 ceph-mon[51053]: overall HEALTH_OK 2026-03-08T23:54:49.349 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:48 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:49.349 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:48 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:54:49.349 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:48 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:54:49.349 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:48 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:54:49.387 INFO:teuthology.orchestra.run.vm10.stderr:1+0 records in 2026-03-08T23:54:49.387 INFO:teuthology.orchestra.run.vm10.stderr:1+0 records out 2026-03-08T23:54:49.387 INFO:teuthology.orchestra.run.vm10.stderr:512 bytes copied, 0.000193402 s, 2.6 MB/s 2026-03-08T23:54:49.388 DEBUG:teuthology.orchestra.run.vm10:> ! mount | grep -v devtmpfs | grep -q /dev/vdc 2026-03-08T23:54:49.408 DEBUG:teuthology.orchestra.run.vm10:> stat /dev/vdd 2026-03-08T23:54:49.464 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 08 23:54:49 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[50180]: 2026-03-08T23:54:49.464+0000 7fcdffaf9000 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-08T23:54:49.474 INFO:teuthology.orchestra.run.vm10.stdout: File: /dev/vdd 2026-03-08T23:54:49.474 INFO:teuthology.orchestra.run.vm10.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-08T23:54:49.474 INFO:teuthology.orchestra.run.vm10.stdout:Device: 6h/6d Inode: 256 Links: 1 Device type: fc,30 2026-03-08T23:54:49.474 INFO:teuthology.orchestra.run.vm10.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-08T23:54:49.474 INFO:teuthology.orchestra.run.vm10.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-08T23:54:49.474 INFO:teuthology.orchestra.run.vm10.stdout:Access: 2026-03-08 23:54:34.648706556 +0000 2026-03-08T23:54:49.474 INFO:teuthology.orchestra.run.vm10.stdout:Modify: 2026-03-08 23:54:34.287288624 +0000 2026-03-08T23:54:49.474 INFO:teuthology.orchestra.run.vm10.stdout:Change: 2026-03-08 23:54:34.287288624 +0000 2026-03-08T23:54:49.474 INFO:teuthology.orchestra.run.vm10.stdout: Birth: 2026-03-08 23:50:26.252000000 +0000 2026-03-08T23:54:49.474 DEBUG:teuthology.orchestra.run.vm10:> sudo dd if=/dev/vdd of=/dev/null count=1 2026-03-08T23:54:49.538 INFO:teuthology.orchestra.run.vm10.stderr:1+0 records in 2026-03-08T23:54:49.538 INFO:teuthology.orchestra.run.vm10.stderr:1+0 records out 2026-03-08T23:54:49.538 INFO:teuthology.orchestra.run.vm10.stderr:512 bytes copied, 0.000166161 s, 3.1 MB/s 2026-03-08T23:54:49.539 DEBUG:teuthology.orchestra.run.vm10:> ! mount | grep -v devtmpfs | grep -q /dev/vdd 2026-03-08T23:54:49.597 DEBUG:teuthology.orchestra.run.vm10:> stat /dev/vde 2026-03-08T23:54:49.659 INFO:teuthology.orchestra.run.vm10.stdout: File: /dev/vde 2026-03-08T23:54:49.659 INFO:teuthology.orchestra.run.vm10.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-08T23:54:49.659 INFO:teuthology.orchestra.run.vm10.stdout:Device: 6h/6d Inode: 257 Links: 1 Device type: fc,40 2026-03-08T23:54:49.659 INFO:teuthology.orchestra.run.vm10.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-08T23:54:49.659 INFO:teuthology.orchestra.run.vm10.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-08T23:54:49.659 INFO:teuthology.orchestra.run.vm10.stdout:Access: 2026-03-08 23:54:34.731802646 +0000 2026-03-08T23:54:49.659 INFO:teuthology.orchestra.run.vm10.stdout:Modify: 2026-03-08 23:54:34.283283993 +0000 2026-03-08T23:54:49.659 INFO:teuthology.orchestra.run.vm10.stdout:Change: 2026-03-08 23:54:34.283283993 +0000 2026-03-08T23:54:49.659 INFO:teuthology.orchestra.run.vm10.stdout: Birth: 2026-03-08 23:50:26.259000000 +0000 2026-03-08T23:54:49.659 DEBUG:teuthology.orchestra.run.vm10:> sudo dd if=/dev/vde of=/dev/null count=1 2026-03-08T23:54:49.733 INFO:teuthology.orchestra.run.vm10.stderr:1+0 records in 2026-03-08T23:54:49.733 INFO:teuthology.orchestra.run.vm10.stderr:1+0 records out 2026-03-08T23:54:49.733 INFO:teuthology.orchestra.run.vm10.stderr:512 bytes copied, 0.000166532 s, 3.1 MB/s 2026-03-08T23:54:49.734 DEBUG:teuthology.orchestra.run.vm10:> ! mount | grep -v devtmpfs | grep -q /dev/vde 2026-03-08T23:54:49.796 INFO:tasks.cephadm:Deploying osd.0 on vm04 with /dev/vde... 2026-03-08T23:54:49.796 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -- lvm zap /dev/vde 2026-03-08T23:54:49.995 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 08 23:54:49 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[50180]: 2026-03-08T23:54:49.845+0000 7fcdffaf9000 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-08T23:54:50.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:50 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-08T23:54:50.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:50 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:50.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:50 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:50.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:50 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.y", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-08T23:54:50.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:50 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-08T23:54:50.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:50 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:54:50.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:50 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:50.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:50 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:54:50.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:50 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:54:50.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:50 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:54:50.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:50 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:50.327 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 08 23:54:49 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[50180]: 2026-03-08T23:54:49.995+0000 7fcdffaf9000 -1 mgr[py] Module diskprediction_local has missing NOTIFY_TYPES member 2026-03-08T23:54:50.327 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 08 23:54:50 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[50180]: 2026-03-08T23:54:50.053+0000 7fcdffaf9000 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-08T23:54:50.327 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 08 23:54:50 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[50180]: 2026-03-08T23:54:50.239+0000 7fcdffaf9000 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-08T23:54:50.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:50 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-08T23:54:50.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:50 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:50.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:50 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:50.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:50 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.y", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-08T23:54:50.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:50 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-08T23:54:50.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:50 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:54:50.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:50 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:50.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:50 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:54:50.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:50 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:54:50.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:50 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:54:50.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:50 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:50.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:50 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-08T23:54:50.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:50 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:50.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:50 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:50.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:50 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.y", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-08T23:54:50.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:50 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-08T23:54:50.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:50 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:54:50.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:50 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:50.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:50 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:54:50.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:50 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:54:50.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:50 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:54:50.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:50 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:50.446 INFO:teuthology.orchestra.run.vm04.stdout: 2026-03-08T23:54:50.464 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -- ceph orch daemon add osd vm04:/dev/vde 2026-03-08T23:54:51.222 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:51 vm10 ceph-mon[48982]: Reconfiguring mgr.y (unknown last config time)... 2026-03-08T23:54:51.222 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:51 vm10 ceph-mon[48982]: Reconfiguring daemon mgr.y on vm04 2026-03-08T23:54:51.222 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:51 vm10 ceph-mon[48982]: pgmap v6: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-08T23:54:51.222 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 08 23:54:50 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[50180]: 2026-03-08T23:54:50.938+0000 7fcdffaf9000 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-08T23:54:51.223 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 08 23:54:51 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[50180]: 2026-03-08T23:54:51.149+0000 7fcdffaf9000 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-08T23:54:51.254 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:51 vm04 ceph-mon[46823]: Reconfiguring mgr.y (unknown last config time)... 2026-03-08T23:54:51.254 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:51 vm04 ceph-mon[46823]: Reconfiguring daemon mgr.y on vm04 2026-03-08T23:54:51.254 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:51 vm04 ceph-mon[46823]: pgmap v6: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-08T23:54:51.254 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:51 vm04 ceph-mon[51053]: Reconfiguring mgr.y (unknown last config time)... 2026-03-08T23:54:51.254 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:51 vm04 ceph-mon[51053]: Reconfiguring daemon mgr.y on vm04 2026-03-08T23:54:51.254 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:51 vm04 ceph-mon[51053]: pgmap v6: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-08T23:54:51.576 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 08 23:54:51 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[50180]: 2026-03-08T23:54:51.223+0000 7fcdffaf9000 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-08T23:54:51.577 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 08 23:54:51 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[50180]: 2026-03-08T23:54:51.286+0000 7fcdffaf9000 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-08T23:54:51.577 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 08 23:54:51 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[50180]: 2026-03-08T23:54:51.358+0000 7fcdffaf9000 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-08T23:54:51.577 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 08 23:54:51 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[50180]: 2026-03-08T23:54:51.424+0000 7fcdffaf9000 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-08T23:54:52.076 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:52 vm10 ceph-mon[48982]: from='client.24100 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm04:/dev/vde", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:54:52.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:52 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-08T23:54:52.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:52 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-08T23:54:52.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:52 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:54:52.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:52 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:52.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:52 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/2343609733' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "d3ce7c0b-7841-417d-8412-02f631c2946d"}]: dispatch 2026-03-08T23:54:52.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:52 vm10 ceph-mon[48982]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "d3ce7c0b-7841-417d-8412-02f631c2946d"}]: dispatch 2026-03-08T23:54:52.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:52 vm10 ceph-mon[48982]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "d3ce7c0b-7841-417d-8412-02f631c2946d"}]': finished 2026-03-08T23:54:52.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:52 vm10 ceph-mon[48982]: osdmap e5: 1 total, 0 up, 1 in 2026-03-08T23:54:52.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:52 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-08T23:54:52.077 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 08 23:54:51 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[50180]: 2026-03-08T23:54:51.765+0000 7fcdffaf9000 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-08T23:54:52.077 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 08 23:54:51 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[50180]: 2026-03-08T23:54:51.847+0000 7fcdffaf9000 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-08T23:54:52.349 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:52 vm04 ceph-mon[51053]: from='client.24100 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm04:/dev/vde", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:54:52.349 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:52 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-08T23:54:52.349 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:52 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-08T23:54:52.349 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:52 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:54:52.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:52 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:52.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:52 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/2343609733' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "d3ce7c0b-7841-417d-8412-02f631c2946d"}]: dispatch 2026-03-08T23:54:52.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:52 vm04 ceph-mon[51053]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "d3ce7c0b-7841-417d-8412-02f631c2946d"}]: dispatch 2026-03-08T23:54:52.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:52 vm04 ceph-mon[51053]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "d3ce7c0b-7841-417d-8412-02f631c2946d"}]': finished 2026-03-08T23:54:52.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:52 vm04 ceph-mon[51053]: osdmap e5: 1 total, 0 up, 1 in 2026-03-08T23:54:52.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:52 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-08T23:54:52.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:52 vm04 ceph-mon[46823]: from='client.24100 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm04:/dev/vde", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:54:52.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:52 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-08T23:54:52.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:52 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-08T23:54:52.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:52 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:54:52.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:52 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:52.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:52 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/2343609733' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "d3ce7c0b-7841-417d-8412-02f631c2946d"}]: dispatch 2026-03-08T23:54:52.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:52 vm04 ceph-mon[46823]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "d3ce7c0b-7841-417d-8412-02f631c2946d"}]: dispatch 2026-03-08T23:54:52.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:52 vm04 ceph-mon[46823]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "d3ce7c0b-7841-417d-8412-02f631c2946d"}]': finished 2026-03-08T23:54:52.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:52 vm04 ceph-mon[46823]: osdmap e5: 1 total, 0 up, 1 in 2026-03-08T23:54:52.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:52 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-08T23:54:52.712 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 08 23:54:52 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[50180]: 2026-03-08T23:54:52.447+0000 7fcdffaf9000 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-08T23:54:52.712 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 08 23:54:52 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[50180]: 2026-03-08T23:54:52.513+0000 7fcdffaf9000 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-08T23:54:52.712 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 08 23:54:52 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[50180]: 2026-03-08T23:54:52.582+0000 7fcdffaf9000 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-08T23:54:52.971 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 08 23:54:52 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[50180]: 2026-03-08T23:54:52.712+0000 7fcdffaf9000 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-08T23:54:52.971 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 08 23:54:52 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[50180]: 2026-03-08T23:54:52.775+0000 7fcdffaf9000 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-08T23:54:52.971 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 08 23:54:52 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[50180]: 2026-03-08T23:54:52.876+0000 7fcdffaf9000 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-08T23:54:53.313 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:53 vm10 ceph-mon[48982]: pgmap v7: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-08T23:54:53.313 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:53 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/1028364531' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-08T23:54:53.313 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 08 23:54:52 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[50180]: 2026-03-08T23:54:52.971+0000 7fcdffaf9000 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-08T23:54:53.349 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:53 vm04 ceph-mon[46823]: pgmap v7: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-08T23:54:53.349 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:53 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/1028364531' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-08T23:54:53.349 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:53 vm04 ceph-mon[51053]: pgmap v7: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-08T23:54:53.349 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:53 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/1028364531' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-08T23:54:53.576 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 08 23:54:53 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[50180]: 2026-03-08T23:54:53.313+0000 7fcdffaf9000 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-08T23:54:53.576 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 08 23:54:53 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[50180]: 2026-03-08T23:54:53.375+0000 7fcdffaf9000 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-08T23:54:54.326 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:54 vm10 ceph-mon[48982]: Standby manager daemon x started 2026-03-08T23:54:54.326 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:54 vm10 ceph-mon[48982]: from='mgr.? 192.168.123.110:0/730296303' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-08T23:54:54.326 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:54 vm10 ceph-mon[48982]: from='mgr.? 192.168.123.110:0/730296303' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-08T23:54:54.326 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:54 vm10 ceph-mon[48982]: from='mgr.? 192.168.123.110:0/730296303' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-08T23:54:54.326 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:54 vm10 ceph-mon[48982]: from='mgr.? 192.168.123.110:0/730296303' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-08T23:54:54.349 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:54 vm04 ceph-mon[46823]: Standby manager daemon x started 2026-03-08T23:54:54.349 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:54 vm04 ceph-mon[46823]: from='mgr.? 192.168.123.110:0/730296303' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-08T23:54:54.349 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:54 vm04 ceph-mon[46823]: from='mgr.? 192.168.123.110:0/730296303' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-08T23:54:54.349 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:54 vm04 ceph-mon[46823]: from='mgr.? 192.168.123.110:0/730296303' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-08T23:54:54.349 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:54 vm04 ceph-mon[46823]: from='mgr.? 192.168.123.110:0/730296303' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-08T23:54:54.349 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:54 vm04 ceph-mon[51053]: Standby manager daemon x started 2026-03-08T23:54:54.349 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:54 vm04 ceph-mon[51053]: from='mgr.? 192.168.123.110:0/730296303' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-08T23:54:54.349 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:54 vm04 ceph-mon[51053]: from='mgr.? 192.168.123.110:0/730296303' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-08T23:54:54.349 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:54 vm04 ceph-mon[51053]: from='mgr.? 192.168.123.110:0/730296303' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-08T23:54:54.349 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:54 vm04 ceph-mon[51053]: from='mgr.? 192.168.123.110:0/730296303' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-08T23:54:55.020 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:55 vm04 ceph-mon[46823]: pgmap v9: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-08T23:54:55.020 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:55 vm04 ceph-mon[46823]: mgrmap e14: y(active, since 28s), standbys: x 2026-03-08T23:54:55.020 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:55 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-08T23:54:55.020 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:55 vm04 ceph-mon[51053]: pgmap v9: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-08T23:54:55.020 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:55 vm04 ceph-mon[51053]: mgrmap e14: y(active, since 28s), standbys: x 2026-03-08T23:54:55.020 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:55 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-08T23:54:55.326 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:55 vm10 ceph-mon[48982]: pgmap v9: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-08T23:54:55.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:55 vm10 ceph-mon[48982]: mgrmap e14: y(active, since 28s), standbys: x 2026-03-08T23:54:55.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:55 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-08T23:54:56.326 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:56 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.0"}]: dispatch 2026-03-08T23:54:56.326 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:56 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:54:56.349 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:56 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.0"}]: dispatch 2026-03-08T23:54:56.349 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:56 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:54:56.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:56 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.0"}]: dispatch 2026-03-08T23:54:56.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:56 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:54:57.326 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:57 vm10 ceph-mon[48982]: Deploying daemon osd.0 on vm04 2026-03-08T23:54:57.326 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:57 vm10 ceph-mon[48982]: pgmap v10: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-08T23:54:57.326 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:57 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:57.326 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:57 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:54:57.326 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:57 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:54:57.326 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:57 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:54:57.335 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:57 vm04 ceph-mon[46823]: Deploying daemon osd.0 on vm04 2026-03-08T23:54:57.335 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:57 vm04 ceph-mon[46823]: pgmap v10: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-08T23:54:57.335 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:57 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:57.335 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:57 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:54:57.335 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:57 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:54:57.335 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:57 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:54:57.335 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:57 vm04 ceph-mon[51053]: Deploying daemon osd.0 on vm04 2026-03-08T23:54:57.335 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:57 vm04 ceph-mon[51053]: pgmap v10: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-08T23:54:57.335 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:57 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:57.335 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:57 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:54:57.335 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:57 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:54:57.335 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:57 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:54:57.533 INFO:teuthology.orchestra.run.vm04.stdout:Created osd(s) 0 on host 'vm04' 2026-03-08T23:54:57.588 DEBUG:teuthology.orchestra.run.vm04:osd.0> sudo journalctl -f -n 0 -u ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@osd.0.service 2026-03-08T23:54:57.590 INFO:tasks.cephadm:Deploying osd.1 on vm04 with /dev/vdd... 2026-03-08T23:54:57.590 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -- lvm zap /dev/vdd 2026-03-08T23:54:58.599 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:58 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:58.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:58 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:58.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:58 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:58.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:58 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:54:58.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:58 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:54:58.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:58 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:54:58.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:58 vm04 ceph-mon[51053]: pgmap v11: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-08T23:54:58.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:58 vm04 ceph-mon[51053]: from='osd.0 [v2:192.168.123.104:6802/959618434,v1:192.168.123.104:6803/959618434]' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-08T23:54:58.600 INFO:journalctl@ceph.osd.0.vm04.stdout:Mar 08 23:54:58 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-0[54082]: 2026-03-08T23:54:58.281+0000 7f460a0323c0 -1 osd.0 0 log_to_monitors true 2026-03-08T23:54:58.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:58 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:58.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:58 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:58.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:58 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:58.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:58 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:54:58.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:58 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:54:58.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:58 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:54:58.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:58 vm04 ceph-mon[46823]: pgmap v11: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-08T23:54:58.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:58 vm04 ceph-mon[46823]: from='osd.0 [v2:192.168.123.104:6802/959618434,v1:192.168.123.104:6803/959618434]' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-08T23:54:58.826 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:58 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:58.826 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:58 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:58.826 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:58 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:58.826 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:58 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:54:58.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:58 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:54:58.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:58 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:54:58.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:58 vm10 ceph-mon[48982]: pgmap v11: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-08T23:54:58.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:58 vm10 ceph-mon[48982]: from='osd.0 [v2:192.168.123.104:6802/959618434,v1:192.168.123.104:6803/959618434]' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-08T23:54:59.132 INFO:teuthology.orchestra.run.vm04.stdout: 2026-03-08T23:54:59.146 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -- ceph orch daemon add osd vm04:/dev/vdd 2026-03-08T23:54:59.599 INFO:journalctl@ceph.osd.0.vm04.stdout:Mar 08 23:54:59 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-0[54082]: 2026-03-08T23:54:59.544+0000 7f4600a35700 -1 osd.0 0 waiting for initial osdmap 2026-03-08T23:54:59.599 INFO:journalctl@ceph.osd.0.vm04.stdout:Mar 08 23:54:59 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-0[54082]: 2026-03-08T23:54:59.551+0000 7f45fb3cc700 -1 osd.0 7 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-08T23:54:59.599 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:59 vm04 ceph-mon[51053]: from='osd.0 [v2:192.168.123.104:6802/959618434,v1:192.168.123.104:6803/959618434]' entity='osd.0' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]': finished 2026-03-08T23:54:59.599 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:59 vm04 ceph-mon[51053]: osdmap e6: 1 total, 0 up, 1 in 2026-03-08T23:54:59.599 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:59 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-08T23:54:59.599 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:59 vm04 ceph-mon[51053]: from='osd.0 [v2:192.168.123.104:6802/959618434,v1:192.168.123.104:6803/959618434]' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm04", "root=default"]}]: dispatch 2026-03-08T23:54:59.599 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:59 vm04 ceph-mon[51053]: Detected new or changed devices on vm04 2026-03-08T23:54:59.599 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:59 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:59.599 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:59 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm04", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:54:59.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:54:59 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:59.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:59 vm04 ceph-mon[46823]: from='osd.0 [v2:192.168.123.104:6802/959618434,v1:192.168.123.104:6803/959618434]' entity='osd.0' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]': finished 2026-03-08T23:54:59.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:59 vm04 ceph-mon[46823]: osdmap e6: 1 total, 0 up, 1 in 2026-03-08T23:54:59.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:59 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-08T23:54:59.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:59 vm04 ceph-mon[46823]: from='osd.0 [v2:192.168.123.104:6802/959618434,v1:192.168.123.104:6803/959618434]' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm04", "root=default"]}]: dispatch 2026-03-08T23:54:59.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:59 vm04 ceph-mon[46823]: Detected new or changed devices on vm04 2026-03-08T23:54:59.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:59 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:59.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:59 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm04", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:54:59.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:54:59 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:59.826 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:59 vm10 ceph-mon[48982]: from='osd.0 [v2:192.168.123.104:6802/959618434,v1:192.168.123.104:6803/959618434]' entity='osd.0' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]': finished 2026-03-08T23:54:59.826 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:59 vm10 ceph-mon[48982]: osdmap e6: 1 total, 0 up, 1 in 2026-03-08T23:54:59.826 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:59 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-08T23:54:59.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:59 vm10 ceph-mon[48982]: from='osd.0 [v2:192.168.123.104:6802/959618434,v1:192.168.123.104:6803/959618434]' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm04", "root=default"]}]: dispatch 2026-03-08T23:54:59.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:59 vm10 ceph-mon[48982]: Detected new or changed devices on vm04 2026-03-08T23:54:59.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:59 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:54:59.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:59 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm04", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:54:59.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:54:59 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:00.826 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:00 vm10 ceph-mon[48982]: from='osd.0 [v2:192.168.123.104:6802/959618434,v1:192.168.123.104:6803/959618434]' entity='osd.0' cmd='[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm04", "root=default"]}]': finished 2026-03-08T23:55:00.826 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:00 vm10 ceph-mon[48982]: osdmap e7: 1 total, 0 up, 1 in 2026-03-08T23:55:00.826 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:00 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-08T23:55:00.826 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:00 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-08T23:55:00.826 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:00 vm10 ceph-mon[48982]: from='client.14235 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm04:/dev/vdd", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:55:00.826 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:00 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-08T23:55:00.826 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:00 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-08T23:55:00.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:00 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:55:00.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:00 vm10 ceph-mon[48982]: pgmap v14: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-08T23:55:00.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:00 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/2432398968' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "75d29058-61cd-44da-9ebc-7516b509075d"}]: dispatch 2026-03-08T23:55:00.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:00 vm10 ceph-mon[48982]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "75d29058-61cd-44da-9ebc-7516b509075d"}]: dispatch 2026-03-08T23:55:00.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:00 vm10 ceph-mon[48982]: osd.0 [v2:192.168.123.104:6802/959618434,v1:192.168.123.104:6803/959618434] boot 2026-03-08T23:55:00.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:00 vm10 ceph-mon[48982]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "75d29058-61cd-44da-9ebc-7516b509075d"}]': finished 2026-03-08T23:55:00.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:00 vm10 ceph-mon[48982]: osdmap e8: 2 total, 1 up, 2 in 2026-03-08T23:55:00.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:00 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-08T23:55:00.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:00 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-08T23:55:00.849 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:00 vm04 ceph-mon[46823]: from='osd.0 [v2:192.168.123.104:6802/959618434,v1:192.168.123.104:6803/959618434]' entity='osd.0' cmd='[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm04", "root=default"]}]': finished 2026-03-08T23:55:00.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:00 vm04 ceph-mon[46823]: osdmap e7: 1 total, 0 up, 1 in 2026-03-08T23:55:00.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:00 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-08T23:55:00.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:00 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-08T23:55:00.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:00 vm04 ceph-mon[46823]: from='client.14235 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm04:/dev/vdd", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:55:00.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:00 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-08T23:55:00.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:00 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-08T23:55:00.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:00 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:55:00.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:00 vm04 ceph-mon[46823]: pgmap v14: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-08T23:55:00.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:00 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/2432398968' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "75d29058-61cd-44da-9ebc-7516b509075d"}]: dispatch 2026-03-08T23:55:00.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:00 vm04 ceph-mon[46823]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "75d29058-61cd-44da-9ebc-7516b509075d"}]: dispatch 2026-03-08T23:55:00.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:00 vm04 ceph-mon[46823]: osd.0 [v2:192.168.123.104:6802/959618434,v1:192.168.123.104:6803/959618434] boot 2026-03-08T23:55:00.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:00 vm04 ceph-mon[46823]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "75d29058-61cd-44da-9ebc-7516b509075d"}]': finished 2026-03-08T23:55:00.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:00 vm04 ceph-mon[46823]: osdmap e8: 2 total, 1 up, 2 in 2026-03-08T23:55:00.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:00 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-08T23:55:00.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:00 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-08T23:55:00.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:00 vm04 ceph-mon[51053]: from='osd.0 [v2:192.168.123.104:6802/959618434,v1:192.168.123.104:6803/959618434]' entity='osd.0' cmd='[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm04", "root=default"]}]': finished 2026-03-08T23:55:00.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:00 vm04 ceph-mon[51053]: osdmap e7: 1 total, 0 up, 1 in 2026-03-08T23:55:00.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:00 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-08T23:55:00.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:00 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-08T23:55:00.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:00 vm04 ceph-mon[51053]: from='client.14235 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm04:/dev/vdd", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:55:00.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:00 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-08T23:55:00.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:00 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-08T23:55:00.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:00 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:55:00.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:00 vm04 ceph-mon[51053]: pgmap v14: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-08T23:55:00.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:00 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/2432398968' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "75d29058-61cd-44da-9ebc-7516b509075d"}]: dispatch 2026-03-08T23:55:00.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:00 vm04 ceph-mon[51053]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "75d29058-61cd-44da-9ebc-7516b509075d"}]: dispatch 2026-03-08T23:55:00.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:00 vm04 ceph-mon[51053]: osd.0 [v2:192.168.123.104:6802/959618434,v1:192.168.123.104:6803/959618434] boot 2026-03-08T23:55:00.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:00 vm04 ceph-mon[51053]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "75d29058-61cd-44da-9ebc-7516b509075d"}]': finished 2026-03-08T23:55:00.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:00 vm04 ceph-mon[51053]: osdmap e8: 2 total, 1 up, 2 in 2026-03-08T23:55:00.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:00 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-08T23:55:00.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:00 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-08T23:55:01.826 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:01 vm10 ceph-mon[48982]: purged_snaps scrub starts 2026-03-08T23:55:01.826 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:01 vm10 ceph-mon[48982]: purged_snaps scrub ok 2026-03-08T23:55:01.826 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:01 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/3038026589' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-08T23:55:01.849 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:01 vm04 ceph-mon[46823]: purged_snaps scrub starts 2026-03-08T23:55:01.849 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:01 vm04 ceph-mon[46823]: purged_snaps scrub ok 2026-03-08T23:55:01.849 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:01 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/3038026589' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-08T23:55:01.849 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:01 vm04 ceph-mon[51053]: purged_snaps scrub starts 2026-03-08T23:55:01.849 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:01 vm04 ceph-mon[51053]: purged_snaps scrub ok 2026-03-08T23:55:01.849 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:01 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/3038026589' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-08T23:55:02.826 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:02 vm10 ceph-mon[48982]: pgmap v16: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-08T23:55:02.826 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:02 vm10 ceph-mon[48982]: osdmap e9: 2 total, 1 up, 2 in 2026-03-08T23:55:02.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:02 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-08T23:55:02.849 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:02 vm04 ceph-mon[51053]: pgmap v16: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-08T23:55:02.849 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:02 vm04 ceph-mon[51053]: osdmap e9: 2 total, 1 up, 2 in 2026-03-08T23:55:02.849 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:02 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-08T23:55:02.849 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:02 vm04 ceph-mon[46823]: pgmap v16: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-08T23:55:02.849 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:02 vm04 ceph-mon[46823]: osdmap e9: 2 total, 1 up, 2 in 2026-03-08T23:55:02.849 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:02 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-08T23:55:04.849 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:04 vm04 ceph-mon[46823]: pgmap v18: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-08T23:55:04.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:04 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.1"}]: dispatch 2026-03-08T23:55:04.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:04 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:55:04.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:04 vm04 ceph-mon[46823]: Deploying daemon osd.1 on vm04 2026-03-08T23:55:04.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:04 vm04 ceph-mon[51053]: pgmap v18: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-08T23:55:04.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:04 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.1"}]: dispatch 2026-03-08T23:55:04.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:04 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:55:04.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:04 vm04 ceph-mon[51053]: Deploying daemon osd.1 on vm04 2026-03-08T23:55:05.076 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:04 vm10 ceph-mon[48982]: pgmap v18: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-08T23:55:05.076 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:04 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.1"}]: dispatch 2026-03-08T23:55:05.076 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:04 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:55:05.076 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:04 vm10 ceph-mon[48982]: Deploying daemon osd.1 on vm04 2026-03-08T23:55:05.836 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:05 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:55:05.836 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:05 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:55:05.836 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:05 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:55:05.836 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:05 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:05.836 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:05 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:55:05.836 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:05 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:55:05.836 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:05 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:55:05.837 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:05 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:06.076 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:05 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:55:06.076 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:05 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:55:06.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:05 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:55:06.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:05 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:06.238 INFO:teuthology.orchestra.run.vm04.stdout:Created osd(s) 1 on host 'vm04' 2026-03-08T23:55:06.310 DEBUG:teuthology.orchestra.run.vm04:osd.1> sudo journalctl -f -n 0 -u ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@osd.1.service 2026-03-08T23:55:06.312 INFO:tasks.cephadm:Deploying osd.2 on vm04 with /dev/vdc... 2026-03-08T23:55:06.312 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -- lvm zap /dev/vdc 2026-03-08T23:55:07.198 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:07 vm04 ceph-mon[46823]: pgmap v19: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-08T23:55:07.199 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:07 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:07.199 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:07 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:07.199 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:07 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:07.199 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:07 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:55:07.199 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:07 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:55:07.199 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:07 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:55:07.199 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:07 vm04 ceph-mon[46823]: from='osd.1 [v2:192.168.123.104:6810/1772469673,v1:192.168.123.104:6811/1772469673]' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-08T23:55:07.199 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:07 vm04 ceph-mon[51053]: pgmap v19: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-08T23:55:07.199 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:07 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:07.199 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:07 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:07.199 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:07 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:07.199 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:07 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:55:07.199 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:07 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:55:07.199 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:07 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:55:07.199 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:07 vm04 ceph-mon[51053]: from='osd.1 [v2:192.168.123.104:6810/1772469673,v1:192.168.123.104:6811/1772469673]' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-08T23:55:07.199 INFO:journalctl@ceph.osd.1.vm04.stdout:Mar 08 23:55:06 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-1[56800]: 2026-03-08T23:55:06.925+0000 7f1597ff53c0 -1 osd.1 0 log_to_monitors true 2026-03-08T23:55:07.326 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:07 vm10 ceph-mon[48982]: pgmap v19: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-08T23:55:07.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:07 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:07.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:07 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:07.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:07 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:07.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:07 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:55:07.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:07 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:55:07.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:07 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:55:07.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:07 vm10 ceph-mon[48982]: from='osd.1 [v2:192.168.123.104:6810/1772469673,v1:192.168.123.104:6811/1772469673]' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-08T23:55:08.069 INFO:teuthology.orchestra.run.vm04.stdout: 2026-03-08T23:55:08.088 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -- ceph orch daemon add osd vm04:/dev/vdc 2026-03-08T23:55:08.576 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:08 vm10 ceph-mon[48982]: from='osd.1 [v2:192.168.123.104:6810/1772469673,v1:192.168.123.104:6811/1772469673]' entity='osd.1' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]': finished 2026-03-08T23:55:08.576 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:08 vm10 ceph-mon[48982]: osdmap e10: 2 total, 1 up, 2 in 2026-03-08T23:55:08.576 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:08 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-08T23:55:08.576 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:08 vm10 ceph-mon[48982]: from='osd.1 [v2:192.168.123.104:6810/1772469673,v1:192.168.123.104:6811/1772469673]' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm04", "root=default"]}]: dispatch 2026-03-08T23:55:08.576 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:08 vm10 ceph-mon[48982]: Detected new or changed devices on vm04 2026-03-08T23:55:08.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:08 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:08.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:08 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm04", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:55:08.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:08 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:08.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:08 vm10 ceph-mon[48982]: pgmap v21: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-08T23:55:08.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:08 vm04 ceph-mon[46823]: from='osd.1 [v2:192.168.123.104:6810/1772469673,v1:192.168.123.104:6811/1772469673]' entity='osd.1' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]': finished 2026-03-08T23:55:08.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:08 vm04 ceph-mon[46823]: osdmap e10: 2 total, 1 up, 2 in 2026-03-08T23:55:08.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:08 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-08T23:55:08.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:08 vm04 ceph-mon[46823]: from='osd.1 [v2:192.168.123.104:6810/1772469673,v1:192.168.123.104:6811/1772469673]' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm04", "root=default"]}]: dispatch 2026-03-08T23:55:08.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:08 vm04 ceph-mon[46823]: Detected new or changed devices on vm04 2026-03-08T23:55:08.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:08 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:08.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:08 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm04", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:55:08.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:08 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:08.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:08 vm04 ceph-mon[46823]: pgmap v21: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-08T23:55:08.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:08 vm04 ceph-mon[51053]: from='osd.1 [v2:192.168.123.104:6810/1772469673,v1:192.168.123.104:6811/1772469673]' entity='osd.1' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]': finished 2026-03-08T23:55:08.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:08 vm04 ceph-mon[51053]: osdmap e10: 2 total, 1 up, 2 in 2026-03-08T23:55:08.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:08 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-08T23:55:08.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:08 vm04 ceph-mon[51053]: from='osd.1 [v2:192.168.123.104:6810/1772469673,v1:192.168.123.104:6811/1772469673]' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm04", "root=default"]}]: dispatch 2026-03-08T23:55:08.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:08 vm04 ceph-mon[51053]: Detected new or changed devices on vm04 2026-03-08T23:55:08.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:08 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:08.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:08 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm04", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:55:08.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:08 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:08.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:08 vm04 ceph-mon[51053]: pgmap v21: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-08T23:55:08.600 INFO:journalctl@ceph.osd.1.vm04.stdout:Mar 08 23:55:08 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-1[56800]: 2026-03-08T23:55:08.311+0000 7f15901fb700 -1 osd.1 0 waiting for initial osdmap 2026-03-08T23:55:08.600 INFO:journalctl@ceph.osd.1.vm04.stdout:Mar 08 23:55:08 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-1[56800]: 2026-03-08T23:55:08.338+0000 7f158b393700 -1 osd.1 11 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-08T23:55:09.325 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:09 vm04 ceph-mon[46823]: from='osd.1 [v2:192.168.123.104:6810/1772469673,v1:192.168.123.104:6811/1772469673]' entity='osd.1' cmd='[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm04", "root=default"]}]': finished 2026-03-08T23:55:09.325 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:09 vm04 ceph-mon[46823]: osdmap e11: 2 total, 1 up, 2 in 2026-03-08T23:55:09.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:09 vm10 ceph-mon[48982]: from='osd.1 [v2:192.168.123.104:6810/1772469673,v1:192.168.123.104:6811/1772469673]' entity='osd.1' cmd='[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm04", "root=default"]}]': finished 2026-03-08T23:55:09.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:09 vm10 ceph-mon[48982]: osdmap e11: 2 total, 1 up, 2 in 2026-03-08T23:55:09.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:09 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-08T23:55:09.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:09 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-08T23:55:09.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:09 vm10 ceph-mon[48982]: from='client.14256 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm04:/dev/vdc", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:55:09.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:09 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-08T23:55:09.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:09 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-08T23:55:09.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:09 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:55:09.599 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:09 vm04 ceph-mon[51053]: from='osd.1 [v2:192.168.123.104:6810/1772469673,v1:192.168.123.104:6811/1772469673]' entity='osd.1' cmd='[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm04", "root=default"]}]': finished 2026-03-08T23:55:09.599 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:09 vm04 ceph-mon[51053]: osdmap e11: 2 total, 1 up, 2 in 2026-03-08T23:55:09.599 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:09 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-08T23:55:09.599 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:09 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-08T23:55:09.599 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:09 vm04 ceph-mon[51053]: from='client.14256 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm04:/dev/vdc", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:55:09.599 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:09 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-08T23:55:09.599 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:09 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-08T23:55:09.599 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:09 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:55:09.599 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:09 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-08T23:55:09.599 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:09 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-08T23:55:09.599 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:09 vm04 ceph-mon[46823]: from='client.14256 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm04:/dev/vdc", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:55:09.599 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:09 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-08T23:55:09.599 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:09 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-08T23:55:09.599 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:09 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:55:10.599 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:10 vm04 ceph-mon[46823]: purged_snaps scrub starts 2026-03-08T23:55:10.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:10 vm04 ceph-mon[46823]: purged_snaps scrub ok 2026-03-08T23:55:10.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:10 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-08T23:55:10.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:10 vm04 ceph-mon[46823]: osd.1 [v2:192.168.123.104:6810/1772469673,v1:192.168.123.104:6811/1772469673] boot 2026-03-08T23:55:10.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:10 vm04 ceph-mon[46823]: osdmap e12: 2 total, 2 up, 2 in 2026-03-08T23:55:10.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:10 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-08T23:55:10.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:10 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/2927911262' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "53adb85c-2242-4b5e-a3ed-dfb1b448b743"}]: dispatch 2026-03-08T23:55:10.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:10 vm04 ceph-mon[46823]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "53adb85c-2242-4b5e-a3ed-dfb1b448b743"}]: dispatch 2026-03-08T23:55:10.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:10 vm04 ceph-mon[46823]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "53adb85c-2242-4b5e-a3ed-dfb1b448b743"}]': finished 2026-03-08T23:55:10.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:10 vm04 ceph-mon[46823]: osdmap e13: 3 total, 2 up, 3 in 2026-03-08T23:55:10.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:10 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-08T23:55:10.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:10 vm04 ceph-mon[46823]: pgmap v25: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-08T23:55:10.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:10 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/3293967759' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-08T23:55:10.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:10 vm04 ceph-mon[51053]: purged_snaps scrub starts 2026-03-08T23:55:10.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:10 vm04 ceph-mon[51053]: purged_snaps scrub ok 2026-03-08T23:55:10.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:10 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-08T23:55:10.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:10 vm04 ceph-mon[51053]: osd.1 [v2:192.168.123.104:6810/1772469673,v1:192.168.123.104:6811/1772469673] boot 2026-03-08T23:55:10.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:10 vm04 ceph-mon[51053]: osdmap e12: 2 total, 2 up, 2 in 2026-03-08T23:55:10.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:10 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-08T23:55:10.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:10 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/2927911262' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "53adb85c-2242-4b5e-a3ed-dfb1b448b743"}]: dispatch 2026-03-08T23:55:10.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:10 vm04 ceph-mon[51053]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "53adb85c-2242-4b5e-a3ed-dfb1b448b743"}]: dispatch 2026-03-08T23:55:10.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:10 vm04 ceph-mon[51053]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "53adb85c-2242-4b5e-a3ed-dfb1b448b743"}]': finished 2026-03-08T23:55:10.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:10 vm04 ceph-mon[51053]: osdmap e13: 3 total, 2 up, 3 in 2026-03-08T23:55:10.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:10 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-08T23:55:10.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:10 vm04 ceph-mon[51053]: pgmap v25: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-08T23:55:10.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:10 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/3293967759' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-08T23:55:10.826 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:10 vm10 ceph-mon[48982]: purged_snaps scrub starts 2026-03-08T23:55:10.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:10 vm10 ceph-mon[48982]: purged_snaps scrub ok 2026-03-08T23:55:10.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:10 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-08T23:55:10.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:10 vm10 ceph-mon[48982]: osd.1 [v2:192.168.123.104:6810/1772469673,v1:192.168.123.104:6811/1772469673] boot 2026-03-08T23:55:10.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:10 vm10 ceph-mon[48982]: osdmap e12: 2 total, 2 up, 2 in 2026-03-08T23:55:10.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:10 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-08T23:55:10.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:10 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/2927911262' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "53adb85c-2242-4b5e-a3ed-dfb1b448b743"}]: dispatch 2026-03-08T23:55:10.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:10 vm10 ceph-mon[48982]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "53adb85c-2242-4b5e-a3ed-dfb1b448b743"}]: dispatch 2026-03-08T23:55:10.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:10 vm10 ceph-mon[48982]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "53adb85c-2242-4b5e-a3ed-dfb1b448b743"}]': finished 2026-03-08T23:55:10.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:10 vm10 ceph-mon[48982]: osdmap e13: 3 total, 2 up, 3 in 2026-03-08T23:55:10.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:10 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-08T23:55:10.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:10 vm10 ceph-mon[48982]: pgmap v25: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-08T23:55:10.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:10 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/3293967759' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-08T23:55:11.826 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:11 vm10 ceph-mon[48982]: osdmap e14: 3 total, 2 up, 3 in 2026-03-08T23:55:11.826 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:11 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-08T23:55:11.849 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:11 vm04 ceph-mon[51053]: osdmap e14: 3 total, 2 up, 3 in 2026-03-08T23:55:11.849 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:11 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-08T23:55:11.849 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:11 vm04 ceph-mon[46823]: osdmap e14: 3 total, 2 up, 3 in 2026-03-08T23:55:11.849 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:11 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-08T23:55:12.549 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:12 vm04 ceph-mon[51053]: pgmap v27: 0 pgs: ; 0 B data, 9.7 MiB used, 40 GiB / 40 GiB avail 2026-03-08T23:55:12.549 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:12 vm04 ceph-mon[46823]: pgmap v27: 0 pgs: ; 0 B data, 9.7 MiB used, 40 GiB / 40 GiB avail 2026-03-08T23:55:12.826 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:12 vm10 ceph-mon[48982]: pgmap v27: 0 pgs: ; 0 B data, 9.7 MiB used, 40 GiB / 40 GiB avail 2026-03-08T23:55:13.599 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:13 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.2"}]: dispatch 2026-03-08T23:55:13.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:13 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:55:13.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:13 vm04 ceph-mon[46823]: Deploying daemon osd.2 on vm04 2026-03-08T23:55:13.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:13 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.2"}]: dispatch 2026-03-08T23:55:13.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:13 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:55:13.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:13 vm04 ceph-mon[51053]: Deploying daemon osd.2 on vm04 2026-03-08T23:55:13.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:13 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.2"}]: dispatch 2026-03-08T23:55:13.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:13 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:55:13.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:13 vm10 ceph-mon[48982]: Deploying daemon osd.2 on vm04 2026-03-08T23:55:14.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:14 vm04 ceph-mon[51053]: pgmap v28: 0 pgs: ; 0 B data, 9.7 MiB used, 40 GiB / 40 GiB avail 2026-03-08T23:55:14.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:14 vm04 ceph-mon[46823]: pgmap v28: 0 pgs: ; 0 B data, 9.7 MiB used, 40 GiB / 40 GiB avail 2026-03-08T23:55:14.826 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:14 vm10 ceph-mon[48982]: pgmap v28: 0 pgs: ; 0 B data, 9.7 MiB used, 40 GiB / 40 GiB avail 2026-03-08T23:55:15.728 INFO:teuthology.orchestra.run.vm04.stdout:Created osd(s) 2 on host 'vm04' 2026-03-08T23:55:15.787 DEBUG:teuthology.orchestra.run.vm04:osd.2> sudo journalctl -f -n 0 -u ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@osd.2.service 2026-03-08T23:55:15.788 INFO:tasks.cephadm:Deploying osd.3 on vm04 with /dev/vdb... 2026-03-08T23:55:15.788 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -- lvm zap /dev/vdb 2026-03-08T23:55:16.076 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:15 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:16.076 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:15 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:55:16.076 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:15 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:55:16.076 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:15 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:55:16.076 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:15 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:16.076 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:15 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:16.076 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:15 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:16.076 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:15 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:55:16.076 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:15 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:55:16.076 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:15 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:55:16.076 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:15 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:16.076 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:15 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:55:16.076 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:15 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:55:16.076 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:15 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:55:16.076 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:15 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:16.076 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:15 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:16.076 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:15 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:16.076 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:15 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:55:16.077 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:15 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:55:16.077 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:15 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:55:16.326 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:15 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:16.326 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:15 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:55:16.326 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:15 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:55:16.326 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:15 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:55:16.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:15 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:16.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:15 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:16.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:15 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:16.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:15 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:55:16.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:15 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:55:16.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:15 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:55:16.843 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 08 23:55:16 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-2[59555]: 2026-03-08T23:55:16.454+0000 7ff5ddc623c0 -1 osd.2 0 log_to_monitors true 2026-03-08T23:55:16.843 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:16 vm04 ceph-mon[46823]: pgmap v29: 0 pgs: ; 0 B data, 9.8 MiB used, 40 GiB / 40 GiB avail 2026-03-08T23:55:16.843 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:16 vm04 ceph-mon[46823]: from='osd.2 [v2:192.168.123.104:6818/1494796243,v1:192.168.123.104:6819/1494796243]' entity='osd.2' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]: dispatch 2026-03-08T23:55:17.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:16 vm04 ceph-mon[51053]: pgmap v29: 0 pgs: ; 0 B data, 9.8 MiB used, 40 GiB / 40 GiB avail 2026-03-08T23:55:17.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:16 vm04 ceph-mon[51053]: from='osd.2 [v2:192.168.123.104:6818/1494796243,v1:192.168.123.104:6819/1494796243]' entity='osd.2' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]: dispatch 2026-03-08T23:55:17.326 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:16 vm10 ceph-mon[48982]: pgmap v29: 0 pgs: ; 0 B data, 9.8 MiB used, 40 GiB / 40 GiB avail 2026-03-08T23:55:17.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:16 vm10 ceph-mon[48982]: from='osd.2 [v2:192.168.123.104:6818/1494796243,v1:192.168.123.104:6819/1494796243]' entity='osd.2' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]: dispatch 2026-03-08T23:55:17.521 INFO:teuthology.orchestra.run.vm04.stdout: 2026-03-08T23:55:17.539 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -- ceph orch daemon add osd vm04:/dev/vdb 2026-03-08T23:55:18.308 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:18 vm04 ceph-mon[46823]: Detected new or changed devices on vm04 2026-03-08T23:55:18.308 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:18 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:18.308 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:18 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm04", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:55:18.308 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:18 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:18.308 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:18 vm04 ceph-mon[46823]: from='osd.2 [v2:192.168.123.104:6818/1494796243,v1:192.168.123.104:6819/1494796243]' entity='osd.2' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]': finished 2026-03-08T23:55:18.308 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:18 vm04 ceph-mon[46823]: osdmap e15: 3 total, 2 up, 3 in 2026-03-08T23:55:18.308 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:18 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-08T23:55:18.308 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:18 vm04 ceph-mon[46823]: from='osd.2 [v2:192.168.123.104:6818/1494796243,v1:192.168.123.104:6819/1494796243]' entity='osd.2' cmd=[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm04", "root=default"]}]: dispatch 2026-03-08T23:55:18.308 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:18 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-08T23:55:18.308 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:18 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-08T23:55:18.308 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:18 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:55:18.308 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:18 vm04 ceph-mon[51053]: Detected new or changed devices on vm04 2026-03-08T23:55:18.309 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:18 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:18.309 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:18 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm04", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:55:18.309 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:18 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:18.309 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:18 vm04 ceph-mon[51053]: from='osd.2 [v2:192.168.123.104:6818/1494796243,v1:192.168.123.104:6819/1494796243]' entity='osd.2' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]': finished 2026-03-08T23:55:18.309 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:18 vm04 ceph-mon[51053]: osdmap e15: 3 total, 2 up, 3 in 2026-03-08T23:55:18.309 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:18 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-08T23:55:18.309 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:18 vm04 ceph-mon[51053]: from='osd.2 [v2:192.168.123.104:6818/1494796243,v1:192.168.123.104:6819/1494796243]' entity='osd.2' cmd=[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm04", "root=default"]}]: dispatch 2026-03-08T23:55:18.309 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:18 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-08T23:55:18.309 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:18 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-08T23:55:18.309 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:18 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:55:18.309 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 08 23:55:18 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-2[59555]: 2026-03-08T23:55:18.223+0000 7ff5d5e68700 -1 osd.2 0 waiting for initial osdmap 2026-03-08T23:55:18.309 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 08 23:55:18 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-2[59555]: 2026-03-08T23:55:18.231+0000 7ff5d07ff700 -1 osd.2 16 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-08T23:55:18.576 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:18 vm10 ceph-mon[48982]: Detected new or changed devices on vm04 2026-03-08T23:55:18.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:18 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:18.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:18 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm04", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:55:18.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:18 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:18.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:18 vm10 ceph-mon[48982]: from='osd.2 [v2:192.168.123.104:6818/1494796243,v1:192.168.123.104:6819/1494796243]' entity='osd.2' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]': finished 2026-03-08T23:55:18.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:18 vm10 ceph-mon[48982]: osdmap e15: 3 total, 2 up, 3 in 2026-03-08T23:55:18.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:18 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-08T23:55:18.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:18 vm10 ceph-mon[48982]: from='osd.2 [v2:192.168.123.104:6818/1494796243,v1:192.168.123.104:6819/1494796243]' entity='osd.2' cmd=[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm04", "root=default"]}]: dispatch 2026-03-08T23:55:18.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:18 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-08T23:55:18.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:18 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-08T23:55:18.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:18 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:55:19.349 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:19 vm04 ceph-mon[46823]: pgmap v31: 0 pgs: ; 0 B data, 9.8 MiB used, 40 GiB / 40 GiB avail 2026-03-08T23:55:19.349 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:19 vm04 ceph-mon[46823]: from='client.24157 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm04:/dev/vdb", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:55:19.349 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:19 vm04 ceph-mon[46823]: from='osd.2 [v2:192.168.123.104:6818/1494796243,v1:192.168.123.104:6819/1494796243]' entity='osd.2' cmd='[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm04", "root=default"]}]': finished 2026-03-08T23:55:19.349 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:19 vm04 ceph-mon[46823]: osdmap e16: 3 total, 2 up, 3 in 2026-03-08T23:55:19.349 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:19 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-08T23:55:19.349 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:19 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-08T23:55:19.349 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:19 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/3146795815' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "ac348a8b-4e4c-4ce9-84cd-4eafa34927bb"}]: dispatch 2026-03-08T23:55:19.349 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:19 vm04 ceph-mon[46823]: osd.2 [v2:192.168.123.104:6818/1494796243,v1:192.168.123.104:6819/1494796243] boot 2026-03-08T23:55:19.349 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:19 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/3146795815' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "ac348a8b-4e4c-4ce9-84cd-4eafa34927bb"}]': finished 2026-03-08T23:55:19.349 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:19 vm04 ceph-mon[46823]: osdmap e17: 4 total, 3 up, 4 in 2026-03-08T23:55:19.349 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:19 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-08T23:55:19.349 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:19 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-08T23:55:19.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:19 vm04 ceph-mon[51053]: pgmap v31: 0 pgs: ; 0 B data, 9.8 MiB used, 40 GiB / 40 GiB avail 2026-03-08T23:55:19.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:19 vm04 ceph-mon[51053]: from='client.24157 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm04:/dev/vdb", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:55:19.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:19 vm04 ceph-mon[51053]: from='osd.2 [v2:192.168.123.104:6818/1494796243,v1:192.168.123.104:6819/1494796243]' entity='osd.2' cmd='[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm04", "root=default"]}]': finished 2026-03-08T23:55:19.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:19 vm04 ceph-mon[51053]: osdmap e16: 3 total, 2 up, 3 in 2026-03-08T23:55:19.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:19 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-08T23:55:19.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:19 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-08T23:55:19.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:19 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/3146795815' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "ac348a8b-4e4c-4ce9-84cd-4eafa34927bb"}]: dispatch 2026-03-08T23:55:19.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:19 vm04 ceph-mon[51053]: osd.2 [v2:192.168.123.104:6818/1494796243,v1:192.168.123.104:6819/1494796243] boot 2026-03-08T23:55:19.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:19 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/3146795815' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "ac348a8b-4e4c-4ce9-84cd-4eafa34927bb"}]': finished 2026-03-08T23:55:19.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:19 vm04 ceph-mon[51053]: osdmap e17: 4 total, 3 up, 4 in 2026-03-08T23:55:19.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:19 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-08T23:55:19.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:19 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-08T23:55:19.576 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:19 vm10 ceph-mon[48982]: pgmap v31: 0 pgs: ; 0 B data, 9.8 MiB used, 40 GiB / 40 GiB avail 2026-03-08T23:55:19.576 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:19 vm10 ceph-mon[48982]: from='client.24157 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm04:/dev/vdb", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:55:19.576 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:19 vm10 ceph-mon[48982]: from='osd.2 [v2:192.168.123.104:6818/1494796243,v1:192.168.123.104:6819/1494796243]' entity='osd.2' cmd='[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm04", "root=default"]}]': finished 2026-03-08T23:55:19.576 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:19 vm10 ceph-mon[48982]: osdmap e16: 3 total, 2 up, 3 in 2026-03-08T23:55:19.576 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:19 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-08T23:55:19.576 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:19 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-08T23:55:19.576 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:19 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/3146795815' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "ac348a8b-4e4c-4ce9-84cd-4eafa34927bb"}]: dispatch 2026-03-08T23:55:19.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:19 vm10 ceph-mon[48982]: osd.2 [v2:192.168.123.104:6818/1494796243,v1:192.168.123.104:6819/1494796243] boot 2026-03-08T23:55:19.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:19 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/3146795815' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "ac348a8b-4e4c-4ce9-84cd-4eafa34927bb"}]': finished 2026-03-08T23:55:19.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:19 vm10 ceph-mon[48982]: osdmap e17: 4 total, 3 up, 4 in 2026-03-08T23:55:19.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:19 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-08T23:55:19.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:19 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-08T23:55:20.349 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:20 vm04 ceph-mon[51053]: purged_snaps scrub starts 2026-03-08T23:55:20.349 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:20 vm04 ceph-mon[51053]: purged_snaps scrub ok 2026-03-08T23:55:20.349 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:20 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/3483372455' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-08T23:55:20.349 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:20 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32}]: dispatch 2026-03-08T23:55:20.349 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:20 vm04 ceph-mon[46823]: purged_snaps scrub starts 2026-03-08T23:55:20.349 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:20 vm04 ceph-mon[46823]: purged_snaps scrub ok 2026-03-08T23:55:20.349 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:20 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/3483372455' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-08T23:55:20.349 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:20 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32}]: dispatch 2026-03-08T23:55:20.576 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:20 vm10 ceph-mon[48982]: purged_snaps scrub starts 2026-03-08T23:55:20.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:20 vm10 ceph-mon[48982]: purged_snaps scrub ok 2026-03-08T23:55:20.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:20 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/3483372455' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-08T23:55:20.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:20 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32}]: dispatch 2026-03-08T23:55:21.349 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:21 vm04 ceph-mon[51053]: pgmap v34: 0 pgs: ; 0 B data, 15 MiB used, 60 GiB / 60 GiB avail 2026-03-08T23:55:21.349 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:21 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd='[{"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32}]': finished 2026-03-08T23:55:21.349 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:21 vm04 ceph-mon[51053]: osdmap e18: 4 total, 3 up, 4 in 2026-03-08T23:55:21.349 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:21 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-08T23:55:21.349 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:21 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true}]: dispatch 2026-03-08T23:55:21.349 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:21 vm04 ceph-mon[46823]: pgmap v34: 0 pgs: ; 0 B data, 15 MiB used, 60 GiB / 60 GiB avail 2026-03-08T23:55:21.349 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:21 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd='[{"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32}]': finished 2026-03-08T23:55:21.349 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:21 vm04 ceph-mon[46823]: osdmap e18: 4 total, 3 up, 4 in 2026-03-08T23:55:21.349 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:21 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-08T23:55:21.349 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:21 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true}]: dispatch 2026-03-08T23:55:21.576 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:21 vm10 ceph-mon[48982]: pgmap v34: 0 pgs: ; 0 B data, 15 MiB used, 60 GiB / 60 GiB avail 2026-03-08T23:55:21.576 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:21 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd='[{"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32}]': finished 2026-03-08T23:55:21.576 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:21 vm10 ceph-mon[48982]: osdmap e18: 4 total, 3 up, 4 in 2026-03-08T23:55:21.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:21 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-08T23:55:21.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:21 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true}]: dispatch 2026-03-08T23:55:22.077 INFO:journalctl@ceph.osd.1.vm04.stdout:Mar 08 23:55:22 vm04 sudo[61930]: ceph : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/sbin/smartctl -x --json=o /dev/vdd 2026-03-08T23:55:22.077 INFO:journalctl@ceph.osd.1.vm04.stdout:Mar 08 23:55:22 vm04 sudo[61930]: pam_unix(sudo:session): session opened for user root by (uid=0) 2026-03-08T23:55:22.077 INFO:journalctl@ceph.osd.1.vm04.stdout:Mar 08 23:55:22 vm04 sudo[61930]: pam_unix(sudo:session): session closed for user root 2026-03-08T23:55:22.077 INFO:journalctl@ceph.osd.0.vm04.stdout:Mar 08 23:55:21 vm04 sudo[61869]: ceph : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/sbin/smartctl -x --json=o /dev/vde 2026-03-08T23:55:22.077 INFO:journalctl@ceph.osd.0.vm04.stdout:Mar 08 23:55:21 vm04 sudo[61869]: pam_unix(sudo:session): session opened for user root by (uid=0) 2026-03-08T23:55:22.077 INFO:journalctl@ceph.osd.0.vm04.stdout:Mar 08 23:55:21 vm04 sudo[61869]: pam_unix(sudo:session): session closed for user root 2026-03-08T23:55:22.336 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 08 23:55:22 vm04 sudo[61941]: ceph : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/sbin/smartctl -x --json=o /dev/vdc 2026-03-08T23:55:22.337 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 08 23:55:22 vm04 sudo[61941]: pam_unix(sudo:session): session opened for user root by (uid=0) 2026-03-08T23:55:22.337 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 08 23:55:22 vm04 sudo[61941]: pam_unix(sudo:session): session closed for user root 2026-03-08T23:55:22.337 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:22 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd='[{"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true}]': finished 2026-03-08T23:55:22.337 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:22 vm04 ceph-mon[51053]: osdmap e19: 4 total, 3 up, 4 in 2026-03-08T23:55:22.337 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:22 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-08T23:55:22.337 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:22 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd='[{"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true}]': finished 2026-03-08T23:55:22.337 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:22 vm04 ceph-mon[46823]: osdmap e19: 4 total, 3 up, 4 in 2026-03-08T23:55:22.337 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:22 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-08T23:55:22.576 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:22 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd='[{"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true}]': finished 2026-03-08T23:55:22.576 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:22 vm10 ceph-mon[48982]: osdmap e19: 4 total, 3 up, 4 in 2026-03-08T23:55:22.576 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:22 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-08T23:55:22.599 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:22 vm04 sudo[61960]: ceph : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/sbin/smartctl -x --json=o /dev/vda 2026-03-08T23:55:22.599 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:22 vm04 sudo[61960]: pam_unix(sudo:session): session opened for user root by (uid=0) 2026-03-08T23:55:22.599 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:22 vm04 sudo[61960]: pam_unix(sudo:session): session closed for user root 2026-03-08T23:55:22.899 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:22 vm04 sudo[62021]: ceph : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/sbin/smartctl -x --json=o /dev/vda 2026-03-08T23:55:22.899 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:22 vm04 sudo[62021]: pam_unix(sudo:session): session opened for user root by (uid=0) 2026-03-08T23:55:22.899 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:22 vm04 sudo[62021]: pam_unix(sudo:session): session closed for user root 2026-03-08T23:55:23.112 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:22 vm10 sudo[50734]: ceph : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/sbin/smartctl -x --json=o /dev/vda 2026-03-08T23:55:23.112 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:22 vm10 sudo[50734]: pam_unix(sudo:session): session opened for user root by (uid=0) 2026-03-08T23:55:23.113 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:22 vm10 sudo[50734]: pam_unix(sudo:session): session closed for user root 2026-03-08T23:55:23.195 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:23 vm04 ceph-mon[46823]: pgmap v37: 1 pgs: 1 unknown; 0 B data, 15 MiB used, 60 GiB / 60 GiB avail 2026-03-08T23:55:23.195 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:23 vm04 ceph-mon[46823]: osdmap e20: 4 total, 3 up, 4 in 2026-03-08T23:55:23.195 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:23 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-08T23:55:23.195 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:23 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-08T23:55:23.195 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:23 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-08T23:55:23.195 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:23 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-08T23:55:23.195 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:23 vm04 ceph-mon[46823]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-08T23:55:23.195 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:23 vm04 ceph-mon[46823]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-08T23:55:23.195 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:23 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.3"}]: dispatch 2026-03-08T23:55:23.195 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:23 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:55:23.195 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:23 vm04 ceph-mon[46823]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-08T23:55:23.195 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:23 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-08T23:55:23.196 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:23 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-08T23:55:23.196 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:23 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-08T23:55:23.196 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:23 vm04 ceph-mon[46823]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-08T23:55:23.196 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:23 vm04 ceph-mon[46823]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-08T23:55:23.196 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:23 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-08T23:55:23.196 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:23 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-08T23:55:23.196 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:23 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-08T23:55:23.196 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:23 vm04 ceph-mon[46823]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-08T23:55:23.196 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:23 vm04 ceph-mon[51053]: pgmap v37: 1 pgs: 1 unknown; 0 B data, 15 MiB used, 60 GiB / 60 GiB avail 2026-03-08T23:55:23.196 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:23 vm04 ceph-mon[51053]: osdmap e20: 4 total, 3 up, 4 in 2026-03-08T23:55:23.196 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:23 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-08T23:55:23.196 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:23 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-08T23:55:23.196 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:23 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-08T23:55:23.196 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:23 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-08T23:55:23.196 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:23 vm04 ceph-mon[51053]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-08T23:55:23.196 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:23 vm04 ceph-mon[51053]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-08T23:55:23.196 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:23 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.3"}]: dispatch 2026-03-08T23:55:23.196 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:23 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:55:23.196 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:23 vm04 ceph-mon[51053]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-08T23:55:23.196 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:23 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-08T23:55:23.196 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:23 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-08T23:55:23.196 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:23 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-08T23:55:23.196 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:23 vm04 ceph-mon[51053]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-08T23:55:23.196 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:23 vm04 ceph-mon[51053]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-08T23:55:23.196 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:23 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-08T23:55:23.196 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:23 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-08T23:55:23.196 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:23 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-08T23:55:23.196 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:23 vm04 ceph-mon[51053]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-08T23:55:23.576 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:23 vm10 ceph-mon[48982]: pgmap v37: 1 pgs: 1 unknown; 0 B data, 15 MiB used, 60 GiB / 60 GiB avail 2026-03-08T23:55:23.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:23 vm10 ceph-mon[48982]: osdmap e20: 4 total, 3 up, 4 in 2026-03-08T23:55:23.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:23 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-08T23:55:23.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:23 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-08T23:55:23.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:23 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-08T23:55:23.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:23 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-08T23:55:23.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:23 vm10 ceph-mon[48982]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-08T23:55:23.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:23 vm10 ceph-mon[48982]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-08T23:55:23.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:23 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.3"}]: dispatch 2026-03-08T23:55:23.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:23 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:55:23.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:23 vm10 ceph-mon[48982]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-08T23:55:23.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:23 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-08T23:55:23.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:23 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-08T23:55:23.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:23 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-08T23:55:23.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:23 vm10 ceph-mon[48982]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-08T23:55:23.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:23 vm10 ceph-mon[48982]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-08T23:55:23.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:23 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-08T23:55:23.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:23 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-08T23:55:23.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:23 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-08T23:55:23.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:23 vm10 ceph-mon[48982]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-08T23:55:24.308 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:24 vm04 ceph-mon[51053]: Deploying daemon osd.3 on vm04 2026-03-08T23:55:24.308 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:24 vm04 ceph-mon[46823]: Deploying daemon osd.3 on vm04 2026-03-08T23:55:24.576 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:24 vm10 ceph-mon[48982]: Deploying daemon osd.3 on vm04 2026-03-08T23:55:25.245 INFO:teuthology.orchestra.run.vm04.stdout:Created osd(s) 3 on host 'vm04' 2026-03-08T23:55:25.292 DEBUG:teuthology.orchestra.run.vm04:osd.3> sudo journalctl -f -n 0 -u ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@osd.3.service 2026-03-08T23:55:25.293 INFO:tasks.cephadm:Deploying osd.4 on vm10 with /dev/vde... 2026-03-08T23:55:25.293 DEBUG:teuthology.orchestra.run.vm10:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -- lvm zap /dev/vde 2026-03-08T23:55:25.458 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:25 vm10 ceph-mon[48982]: pgmap v39: 1 pgs: 1 unknown; 0 B data, 15 MiB used, 60 GiB / 60 GiB avail 2026-03-08T23:55:25.458 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:25 vm10 ceph-mon[48982]: mgrmap e15: y(active, since 58s), standbys: x 2026-03-08T23:55:25.458 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:25 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:25.458 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:25 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:55:25.458 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:25 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:55:25.458 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:25 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:55:25.514 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:25 vm04 ceph-mon[51053]: pgmap v39: 1 pgs: 1 unknown; 0 B data, 15 MiB used, 60 GiB / 60 GiB avail 2026-03-08T23:55:25.514 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:25 vm04 ceph-mon[51053]: mgrmap e15: y(active, since 58s), standbys: x 2026-03-08T23:55:25.514 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:25 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:25.514 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:25 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:55:25.514 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:25 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:55:25.514 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:25 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:55:25.515 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:25 vm04 ceph-mon[46823]: pgmap v39: 1 pgs: 1 unknown; 0 B data, 15 MiB used, 60 GiB / 60 GiB avail 2026-03-08T23:55:25.515 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:25 vm04 ceph-mon[46823]: mgrmap e15: y(active, since 58s), standbys: x 2026-03-08T23:55:25.515 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:25 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:25.515 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:25 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:55:25.515 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:25 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:55:25.515 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:25 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:55:26.001 INFO:teuthology.orchestra.run.vm10.stdout: 2026-03-08T23:55:26.021 DEBUG:teuthology.orchestra.run.vm10:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -- ceph orch daemon add osd vm10:/dev/vde 2026-03-08T23:55:26.189 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:26 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:26.189 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:26 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:26.189 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:26 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:26.189 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:26 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:55:26.189 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:26 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:55:26.189 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:26 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:55:26.189 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:26 vm10 ceph-mon[48982]: pgmap v40: 1 pgs: 1 active+clean; 449 KiB data, 17 MiB used, 60 GiB / 60 GiB avail 2026-03-08T23:55:26.189 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:26 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:55:26.189 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:26 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-08T23:55:26.553 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:26 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:26.553 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:26 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:26.553 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:26 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:26.553 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:26 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:55:26.553 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:26 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:55:26.553 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:26 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:55:26.553 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:26 vm04 ceph-mon[51053]: pgmap v40: 1 pgs: 1 active+clean; 449 KiB data, 17 MiB used, 60 GiB / 60 GiB avail 2026-03-08T23:55:26.553 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:26 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:55:26.553 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:26 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-08T23:55:26.553 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:26 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:26.553 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:26 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:26.553 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:26 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:26.553 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:26 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:55:26.553 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:26 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:55:26.554 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:26 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:55:26.554 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:26 vm04 ceph-mon[46823]: pgmap v40: 1 pgs: 1 active+clean; 449 KiB data, 17 MiB used, 60 GiB / 60 GiB avail 2026-03-08T23:55:26.554 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:26 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:55:26.554 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:26 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-08T23:55:26.554 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 08 23:55:26 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-3[62355]: 2026-03-08T23:55:26.196+0000 7f1cda7093c0 -1 osd.3 0 log_to_monitors true 2026-03-08T23:55:27.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:27 vm10 ceph-mon[48982]: from='osd.3 [v2:192.168.123.104:6826/2755643936,v1:192.168.123.104:6827/2755643936]' entity='osd.3' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]: dispatch 2026-03-08T23:55:27.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:27 vm10 ceph-mon[48982]: from='osd.3 ' entity='osd.3' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]: dispatch 2026-03-08T23:55:27.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:27 vm10 ceph-mon[48982]: from='client.24155 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm10:/dev/vde", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:55:27.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:27 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-08T23:55:27.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:27 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-08T23:55:27.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:27 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:55:27.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:27 vm10 ceph-mon[48982]: Detected new or changed devices on vm04 2026-03-08T23:55:27.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:27 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:27.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:27 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm04", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:55:27.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:27 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:27.599 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 08 23:55:27 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-3[62355]: 2026-03-08T23:55:27.358+0000 7f1cd290f700 -1 osd.3 0 waiting for initial osdmap 2026-03-08T23:55:27.599 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 08 23:55:27 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-3[62355]: 2026-03-08T23:55:27.364+0000 7f1ccb2a2700 -1 osd.3 22 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-08T23:55:27.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:27 vm04 ceph-mon[51053]: from='osd.3 [v2:192.168.123.104:6826/2755643936,v1:192.168.123.104:6827/2755643936]' entity='osd.3' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]: dispatch 2026-03-08T23:55:27.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:27 vm04 ceph-mon[51053]: from='osd.3 ' entity='osd.3' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]: dispatch 2026-03-08T23:55:27.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:27 vm04 ceph-mon[51053]: from='client.24155 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm10:/dev/vde", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:55:27.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:27 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-08T23:55:27.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:27 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-08T23:55:27.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:27 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:55:27.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:27 vm04 ceph-mon[51053]: Detected new or changed devices on vm04 2026-03-08T23:55:27.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:27 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:27.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:27 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm04", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:55:27.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:27 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:27.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:27 vm04 ceph-mon[46823]: from='osd.3 [v2:192.168.123.104:6826/2755643936,v1:192.168.123.104:6827/2755643936]' entity='osd.3' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]: dispatch 2026-03-08T23:55:27.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:27 vm04 ceph-mon[46823]: from='osd.3 ' entity='osd.3' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]: dispatch 2026-03-08T23:55:27.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:27 vm04 ceph-mon[46823]: from='client.24155 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm10:/dev/vde", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:55:27.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:27 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-08T23:55:27.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:27 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-08T23:55:27.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:27 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:55:27.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:27 vm04 ceph-mon[46823]: Detected new or changed devices on vm04 2026-03-08T23:55:27.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:27 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:27.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:27 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm04", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:55:27.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:27 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:28.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:28 vm10 ceph-mon[48982]: from='osd.3 ' entity='osd.3' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]': finished 2026-03-08T23:55:28.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:28 vm10 ceph-mon[48982]: osdmap e21: 4 total, 3 up, 4 in 2026-03-08T23:55:28.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:28 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-08T23:55:28.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:28 vm10 ceph-mon[48982]: from='osd.3 [v2:192.168.123.104:6826/2755643936,v1:192.168.123.104:6827/2755643936]' entity='osd.3' cmd=[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm04", "root=default"]}]: dispatch 2026-03-08T23:55:28.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:28 vm10 ceph-mon[48982]: from='osd.3 ' entity='osd.3' cmd=[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm04", "root=default"]}]: dispatch 2026-03-08T23:55:28.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:28 vm10 ceph-mon[48982]: from='client.? 192.168.123.110:0/2524452440' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "ffd541f9-68f9-454d-acfc-1323f62f60a0"}]: dispatch 2026-03-08T23:55:28.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:28 vm10 ceph-mon[48982]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "ffd541f9-68f9-454d-acfc-1323f62f60a0"}]: dispatch 2026-03-08T23:55:28.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:28 vm10 ceph-mon[48982]: from='osd.3 ' entity='osd.3' cmd='[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm04", "root=default"]}]': finished 2026-03-08T23:55:28.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:28 vm10 ceph-mon[48982]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "ffd541f9-68f9-454d-acfc-1323f62f60a0"}]': finished 2026-03-08T23:55:28.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:28 vm10 ceph-mon[48982]: osdmap e22: 5 total, 3 up, 5 in 2026-03-08T23:55:28.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:28 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-08T23:55:28.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:28 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-08T23:55:28.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:28 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-08T23:55:28.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:28 vm10 ceph-mon[48982]: pgmap v43: 1 pgs: 1 active+clean; 449 KiB data, 17 MiB used, 60 GiB / 60 GiB avail 2026-03-08T23:55:28.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:28 vm10 ceph-mon[48982]: from='client.? 192.168.123.110:0/925885773' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-08T23:55:28.599 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:28 vm04 ceph-mon[51053]: from='osd.3 ' entity='osd.3' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]': finished 2026-03-08T23:55:28.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:28 vm04 ceph-mon[51053]: osdmap e21: 4 total, 3 up, 4 in 2026-03-08T23:55:28.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:28 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-08T23:55:28.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:28 vm04 ceph-mon[51053]: from='osd.3 [v2:192.168.123.104:6826/2755643936,v1:192.168.123.104:6827/2755643936]' entity='osd.3' cmd=[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm04", "root=default"]}]: dispatch 2026-03-08T23:55:28.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:28 vm04 ceph-mon[51053]: from='osd.3 ' entity='osd.3' cmd=[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm04", "root=default"]}]: dispatch 2026-03-08T23:55:28.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:28 vm04 ceph-mon[51053]: from='client.? 192.168.123.110:0/2524452440' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "ffd541f9-68f9-454d-acfc-1323f62f60a0"}]: dispatch 2026-03-08T23:55:28.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:28 vm04 ceph-mon[51053]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "ffd541f9-68f9-454d-acfc-1323f62f60a0"}]: dispatch 2026-03-08T23:55:28.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:28 vm04 ceph-mon[51053]: from='osd.3 ' entity='osd.3' cmd='[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm04", "root=default"]}]': finished 2026-03-08T23:55:28.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:28 vm04 ceph-mon[51053]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "ffd541f9-68f9-454d-acfc-1323f62f60a0"}]': finished 2026-03-08T23:55:28.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:28 vm04 ceph-mon[51053]: osdmap e22: 5 total, 3 up, 5 in 2026-03-08T23:55:28.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:28 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-08T23:55:28.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:28 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-08T23:55:28.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:28 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-08T23:55:28.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:28 vm04 ceph-mon[51053]: pgmap v43: 1 pgs: 1 active+clean; 449 KiB data, 17 MiB used, 60 GiB / 60 GiB avail 2026-03-08T23:55:28.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:28 vm04 ceph-mon[51053]: from='client.? 192.168.123.110:0/925885773' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-08T23:55:28.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:28 vm04 ceph-mon[46823]: from='osd.3 ' entity='osd.3' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]': finished 2026-03-08T23:55:28.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:28 vm04 ceph-mon[46823]: osdmap e21: 4 total, 3 up, 4 in 2026-03-08T23:55:28.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:28 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-08T23:55:28.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:28 vm04 ceph-mon[46823]: from='osd.3 [v2:192.168.123.104:6826/2755643936,v1:192.168.123.104:6827/2755643936]' entity='osd.3' cmd=[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm04", "root=default"]}]: dispatch 2026-03-08T23:55:28.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:28 vm04 ceph-mon[46823]: from='osd.3 ' entity='osd.3' cmd=[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm04", "root=default"]}]: dispatch 2026-03-08T23:55:28.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:28 vm04 ceph-mon[46823]: from='client.? 192.168.123.110:0/2524452440' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "ffd541f9-68f9-454d-acfc-1323f62f60a0"}]: dispatch 2026-03-08T23:55:28.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:28 vm04 ceph-mon[46823]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "ffd541f9-68f9-454d-acfc-1323f62f60a0"}]: dispatch 2026-03-08T23:55:28.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:28 vm04 ceph-mon[46823]: from='osd.3 ' entity='osd.3' cmd='[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm04", "root=default"]}]': finished 2026-03-08T23:55:28.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:28 vm04 ceph-mon[46823]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "ffd541f9-68f9-454d-acfc-1323f62f60a0"}]': finished 2026-03-08T23:55:28.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:28 vm04 ceph-mon[46823]: osdmap e22: 5 total, 3 up, 5 in 2026-03-08T23:55:28.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:28 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-08T23:55:28.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:28 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-08T23:55:28.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:28 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-08T23:55:28.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:28 vm04 ceph-mon[46823]: pgmap v43: 1 pgs: 1 active+clean; 449 KiB data, 17 MiB used, 60 GiB / 60 GiB avail 2026-03-08T23:55:28.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:28 vm04 ceph-mon[46823]: from='client.? 192.168.123.110:0/925885773' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-08T23:55:29.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:29 vm10 ceph-mon[48982]: purged_snaps scrub starts 2026-03-08T23:55:29.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:29 vm10 ceph-mon[48982]: purged_snaps scrub ok 2026-03-08T23:55:29.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:29 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-08T23:55:29.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:29 vm10 ceph-mon[48982]: osd.3 [v2:192.168.123.104:6826/2755643936,v1:192.168.123.104:6827/2755643936] boot 2026-03-08T23:55:29.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:29 vm10 ceph-mon[48982]: osdmap e23: 5 total, 4 up, 5 in 2026-03-08T23:55:29.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:29 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-08T23:55:29.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:29 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-08T23:55:29.849 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:29 vm04 ceph-mon[51053]: purged_snaps scrub starts 2026-03-08T23:55:29.849 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:29 vm04 ceph-mon[51053]: purged_snaps scrub ok 2026-03-08T23:55:29.849 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:29 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-08T23:55:29.849 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:29 vm04 ceph-mon[51053]: osd.3 [v2:192.168.123.104:6826/2755643936,v1:192.168.123.104:6827/2755643936] boot 2026-03-08T23:55:29.849 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:29 vm04 ceph-mon[51053]: osdmap e23: 5 total, 4 up, 5 in 2026-03-08T23:55:29.849 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:29 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-08T23:55:29.849 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:29 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-08T23:55:29.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:29 vm04 ceph-mon[46823]: purged_snaps scrub starts 2026-03-08T23:55:29.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:29 vm04 ceph-mon[46823]: purged_snaps scrub ok 2026-03-08T23:55:29.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:29 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-08T23:55:29.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:29 vm04 ceph-mon[46823]: osd.3 [v2:192.168.123.104:6826/2755643936,v1:192.168.123.104:6827/2755643936] boot 2026-03-08T23:55:29.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:29 vm04 ceph-mon[46823]: osdmap e23: 5 total, 4 up, 5 in 2026-03-08T23:55:29.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:29 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-08T23:55:29.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:29 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-08T23:55:30.445 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:30 vm10 ceph-mon[48982]: osdmap e24: 5 total, 4 up, 5 in 2026-03-08T23:55:30.445 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:30 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-08T23:55:30.445 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:30 vm10 ceph-mon[48982]: pgmap v46: 1 pgs: 1 active+clean; 449 KiB data, 22 MiB used, 80 GiB / 80 GiB avail 2026-03-08T23:55:30.849 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:30 vm04 ceph-mon[51053]: osdmap e24: 5 total, 4 up, 5 in 2026-03-08T23:55:30.849 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:30 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-08T23:55:30.849 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:30 vm04 ceph-mon[51053]: pgmap v46: 1 pgs: 1 active+clean; 449 KiB data, 22 MiB used, 80 GiB / 80 GiB avail 2026-03-08T23:55:30.849 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:30 vm04 ceph-mon[46823]: osdmap e24: 5 total, 4 up, 5 in 2026-03-08T23:55:30.849 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:30 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-08T23:55:30.849 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:30 vm04 ceph-mon[46823]: pgmap v46: 1 pgs: 1 active+clean; 449 KiB data, 22 MiB used, 80 GiB / 80 GiB avail 2026-03-08T23:55:31.531 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:31 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.4"}]: dispatch 2026-03-08T23:55:31.531 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:31 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:55:31.531 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:31 vm10 ceph-mon[48982]: Deploying daemon osd.4 on vm10 2026-03-08T23:55:31.849 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:31 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.4"}]: dispatch 2026-03-08T23:55:31.849 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:31 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:55:31.849 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:31 vm04 ceph-mon[51053]: Deploying daemon osd.4 on vm10 2026-03-08T23:55:31.849 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:31 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.4"}]: dispatch 2026-03-08T23:55:31.849 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:31 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:55:31.849 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:31 vm04 ceph-mon[46823]: Deploying daemon osd.4 on vm10 2026-03-08T23:55:32.411 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:32 vm10 ceph-mon[48982]: pgmap v47: 1 pgs: 1 active+clean; 449 KiB data, 23 MiB used, 80 GiB / 80 GiB avail 2026-03-08T23:55:32.849 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:32 vm04 ceph-mon[46823]: pgmap v47: 1 pgs: 1 active+clean; 449 KiB data, 23 MiB used, 80 GiB / 80 GiB avail 2026-03-08T23:55:32.849 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:32 vm04 ceph-mon[51053]: pgmap v47: 1 pgs: 1 active+clean; 449 KiB data, 23 MiB used, 80 GiB / 80 GiB avail 2026-03-08T23:55:33.576 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:33 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:55:33.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:33 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:55:33.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:33 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:55:33.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:33 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:33.796 INFO:teuthology.orchestra.run.vm10.stdout:Created osd(s) 4 on host 'vm10' 2026-03-08T23:55:33.849 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:33 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:55:33.849 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:33 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:55:33.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:33 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:55:33.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:33 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:33.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:33 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:55:33.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:33 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:55:33.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:33 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:55:33.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:33 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:33.866 DEBUG:teuthology.orchestra.run.vm10:osd.4> sudo journalctl -f -n 0 -u ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@osd.4.service 2026-03-08T23:55:33.868 INFO:tasks.cephadm:Deploying osd.5 on vm10 with /dev/vdd... 2026-03-08T23:55:33.868 DEBUG:teuthology.orchestra.run.vm10:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -- lvm zap /dev/vdd 2026-03-08T23:55:34.756 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:34 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:34.756 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:34 vm10 ceph-mon[48982]: pgmap v48: 1 pgs: 1 active+clean; 449 KiB data, 23 MiB used, 80 GiB / 80 GiB avail 2026-03-08T23:55:34.756 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:34 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:34.756 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:34 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:34.756 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:34 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:55:34.756 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:34 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:55:34.756 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:34 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:55:34.756 INFO:journalctl@ceph.osd.4.vm10.stdout:Mar 08 23:55:34 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-4[52482]: 2026-03-08T23:55:34.683+0000 7f9dd707b3c0 -1 osd.4 0 log_to_monitors true 2026-03-08T23:55:34.849 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:34 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:34.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:34 vm04 ceph-mon[51053]: pgmap v48: 1 pgs: 1 active+clean; 449 KiB data, 23 MiB used, 80 GiB / 80 GiB avail 2026-03-08T23:55:34.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:34 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:34.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:34 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:34.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:34 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:55:34.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:34 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:55:34.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:34 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:55:34.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:34 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:34.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:34 vm04 ceph-mon[46823]: pgmap v48: 1 pgs: 1 active+clean; 449 KiB data, 23 MiB used, 80 GiB / 80 GiB avail 2026-03-08T23:55:34.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:34 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:34.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:34 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:34.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:34 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:55:34.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:34 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:55:34.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:34 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:55:35.606 INFO:teuthology.orchestra.run.vm10.stdout: 2026-03-08T23:55:35.622 DEBUG:teuthology.orchestra.run.vm10:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -- ceph orch daemon add osd vm10:/dev/vdd 2026-03-08T23:55:35.801 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:35 vm10 ceph-mon[48982]: from='osd.4 [v2:192.168.123.110:6800/2012640669,v1:192.168.123.110:6801/2012640669]' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-08T23:55:35.802 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:35 vm10 ceph-mon[48982]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-08T23:55:35.802 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:35 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:35.802 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:35 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:55:35.802 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:35 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:35.849 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:35 vm04 ceph-mon[51053]: from='osd.4 [v2:192.168.123.110:6800/2012640669,v1:192.168.123.110:6801/2012640669]' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-08T23:55:35.849 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:35 vm04 ceph-mon[51053]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-08T23:55:35.849 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:35 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:35.849 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:35 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:55:35.849 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:35 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:35.849 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:35 vm04 ceph-mon[46823]: from='osd.4 [v2:192.168.123.110:6800/2012640669,v1:192.168.123.110:6801/2012640669]' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-08T23:55:35.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:35 vm04 ceph-mon[46823]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-08T23:55:35.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:35 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:35.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:35 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:55:35.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:35 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:36.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:36 vm10 ceph-mon[48982]: Detected new or changed devices on vm10 2026-03-08T23:55:36.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:36 vm10 ceph-mon[48982]: Adjusting osd_memory_target on vm10 to 257.0M 2026-03-08T23:55:36.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:36 vm10 ceph-mon[48982]: Unable to set osd_memory_target on vm10 to 269536460: error parsing value: Value '269536460' is below minimum 939524096 2026-03-08T23:55:36.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:36 vm10 ceph-mon[48982]: from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]': finished 2026-03-08T23:55:36.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:36 vm10 ceph-mon[48982]: osdmap e25: 5 total, 4 up, 5 in 2026-03-08T23:55:36.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:36 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-08T23:55:36.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:36 vm10 ceph-mon[48982]: from='osd.4 [v2:192.168.123.110:6800/2012640669,v1:192.168.123.110:6801/2012640669]' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm10", "root=default"]}]: dispatch 2026-03-08T23:55:36.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:36 vm10 ceph-mon[48982]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm10", "root=default"]}]: dispatch 2026-03-08T23:55:36.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:36 vm10 ceph-mon[48982]: pgmap v50: 1 pgs: 1 active+clean; 449 KiB data, 23 MiB used, 80 GiB / 80 GiB avail 2026-03-08T23:55:36.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:36 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-08T23:55:36.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:36 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-08T23:55:36.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:36 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:55:36.827 INFO:journalctl@ceph.osd.4.vm10.stdout:Mar 08 23:55:36 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-4[52482]: 2026-03-08T23:55:36.519+0000 7f9dcda7e700 -1 osd.4 0 waiting for initial osdmap 2026-03-08T23:55:36.827 INFO:journalctl@ceph.osd.4.vm10.stdout:Mar 08 23:55:36 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-4[52482]: 2026-03-08T23:55:36.529+0000 7f9dc9417700 -1 osd.4 26 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-08T23:55:36.849 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:36 vm04 ceph-mon[51053]: Detected new or changed devices on vm10 2026-03-08T23:55:36.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:36 vm04 ceph-mon[51053]: Adjusting osd_memory_target on vm10 to 257.0M 2026-03-08T23:55:36.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:36 vm04 ceph-mon[51053]: Unable to set osd_memory_target on vm10 to 269536460: error parsing value: Value '269536460' is below minimum 939524096 2026-03-08T23:55:36.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:36 vm04 ceph-mon[51053]: from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]': finished 2026-03-08T23:55:36.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:36 vm04 ceph-mon[51053]: osdmap e25: 5 total, 4 up, 5 in 2026-03-08T23:55:36.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:36 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-08T23:55:36.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:36 vm04 ceph-mon[51053]: from='osd.4 [v2:192.168.123.110:6800/2012640669,v1:192.168.123.110:6801/2012640669]' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm10", "root=default"]}]: dispatch 2026-03-08T23:55:36.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:36 vm04 ceph-mon[51053]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm10", "root=default"]}]: dispatch 2026-03-08T23:55:36.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:36 vm04 ceph-mon[51053]: pgmap v50: 1 pgs: 1 active+clean; 449 KiB data, 23 MiB used, 80 GiB / 80 GiB avail 2026-03-08T23:55:36.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:36 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-08T23:55:36.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:36 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-08T23:55:36.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:36 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:55:36.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:36 vm04 ceph-mon[46823]: Detected new or changed devices on vm10 2026-03-08T23:55:36.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:36 vm04 ceph-mon[46823]: Adjusting osd_memory_target on vm10 to 257.0M 2026-03-08T23:55:36.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:36 vm04 ceph-mon[46823]: Unable to set osd_memory_target on vm10 to 269536460: error parsing value: Value '269536460' is below minimum 939524096 2026-03-08T23:55:36.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:36 vm04 ceph-mon[46823]: from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]': finished 2026-03-08T23:55:36.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:36 vm04 ceph-mon[46823]: osdmap e25: 5 total, 4 up, 5 in 2026-03-08T23:55:36.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:36 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-08T23:55:36.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:36 vm04 ceph-mon[46823]: from='osd.4 [v2:192.168.123.110:6800/2012640669,v1:192.168.123.110:6801/2012640669]' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm10", "root=default"]}]: dispatch 2026-03-08T23:55:36.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:36 vm04 ceph-mon[46823]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm10", "root=default"]}]: dispatch 2026-03-08T23:55:36.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:36 vm04 ceph-mon[46823]: pgmap v50: 1 pgs: 1 active+clean; 449 KiB data, 23 MiB used, 80 GiB / 80 GiB avail 2026-03-08T23:55:36.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:36 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-08T23:55:36.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:36 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-08T23:55:36.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:36 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:55:37.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:37 vm10 ceph-mon[48982]: from='client.24182 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm10:/dev/vdd", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:55:37.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:37 vm10 ceph-mon[48982]: from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm10", "root=default"]}]': finished 2026-03-08T23:55:37.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:37 vm10 ceph-mon[48982]: osdmap e26: 5 total, 4 up, 5 in 2026-03-08T23:55:37.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:37 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-08T23:55:37.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:37 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-08T23:55:37.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:37 vm10 ceph-mon[48982]: from='client.? 192.168.123.110:0/3944853898' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "5efb3808-0928-47a5-97bc-ecad3a99a5e9"}]: dispatch 2026-03-08T23:55:37.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:37 vm10 ceph-mon[48982]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "5efb3808-0928-47a5-97bc-ecad3a99a5e9"}]: dispatch 2026-03-08T23:55:37.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:37 vm10 ceph-mon[48982]: osd.4 [v2:192.168.123.110:6800/2012640669,v1:192.168.123.110:6801/2012640669] boot 2026-03-08T23:55:37.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:37 vm10 ceph-mon[48982]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "5efb3808-0928-47a5-97bc-ecad3a99a5e9"}]': finished 2026-03-08T23:55:37.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:37 vm10 ceph-mon[48982]: osdmap e27: 6 total, 5 up, 6 in 2026-03-08T23:55:37.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:37 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-08T23:55:37.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:37 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-08T23:55:37.849 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:37 vm04 ceph-mon[51053]: from='client.24182 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm10:/dev/vdd", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:55:37.849 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:37 vm04 ceph-mon[51053]: from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm10", "root=default"]}]': finished 2026-03-08T23:55:37.849 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:37 vm04 ceph-mon[51053]: osdmap e26: 5 total, 4 up, 5 in 2026-03-08T23:55:37.849 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:37 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-08T23:55:37.849 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:37 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-08T23:55:37.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:37 vm04 ceph-mon[51053]: from='client.? 192.168.123.110:0/3944853898' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "5efb3808-0928-47a5-97bc-ecad3a99a5e9"}]: dispatch 2026-03-08T23:55:37.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:37 vm04 ceph-mon[51053]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "5efb3808-0928-47a5-97bc-ecad3a99a5e9"}]: dispatch 2026-03-08T23:55:37.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:37 vm04 ceph-mon[51053]: osd.4 [v2:192.168.123.110:6800/2012640669,v1:192.168.123.110:6801/2012640669] boot 2026-03-08T23:55:37.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:37 vm04 ceph-mon[51053]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "5efb3808-0928-47a5-97bc-ecad3a99a5e9"}]': finished 2026-03-08T23:55:37.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:37 vm04 ceph-mon[51053]: osdmap e27: 6 total, 5 up, 6 in 2026-03-08T23:55:37.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:37 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-08T23:55:37.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:37 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-08T23:55:37.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:37 vm04 ceph-mon[46823]: from='client.24182 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm10:/dev/vdd", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:55:37.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:37 vm04 ceph-mon[46823]: from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm10", "root=default"]}]': finished 2026-03-08T23:55:37.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:37 vm04 ceph-mon[46823]: osdmap e26: 5 total, 4 up, 5 in 2026-03-08T23:55:37.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:37 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-08T23:55:37.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:37 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-08T23:55:37.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:37 vm04 ceph-mon[46823]: from='client.? 192.168.123.110:0/3944853898' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "5efb3808-0928-47a5-97bc-ecad3a99a5e9"}]: dispatch 2026-03-08T23:55:37.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:37 vm04 ceph-mon[46823]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "5efb3808-0928-47a5-97bc-ecad3a99a5e9"}]: dispatch 2026-03-08T23:55:37.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:37 vm04 ceph-mon[46823]: osd.4 [v2:192.168.123.110:6800/2012640669,v1:192.168.123.110:6801/2012640669] boot 2026-03-08T23:55:37.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:37 vm04 ceph-mon[46823]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "5efb3808-0928-47a5-97bc-ecad3a99a5e9"}]': finished 2026-03-08T23:55:37.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:37 vm04 ceph-mon[46823]: osdmap e27: 6 total, 5 up, 6 in 2026-03-08T23:55:37.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:37 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-08T23:55:37.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:37 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-08T23:55:38.826 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:38 vm10 ceph-mon[48982]: purged_snaps scrub starts 2026-03-08T23:55:38.826 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:38 vm10 ceph-mon[48982]: purged_snaps scrub ok 2026-03-08T23:55:38.826 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:38 vm10 ceph-mon[48982]: from='client.? 192.168.123.110:0/2362672469' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-08T23:55:38.826 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:38 vm10 ceph-mon[48982]: pgmap v53: 1 pgs: 1 active+clean; 449 KiB data, 28 MiB used, 100 GiB / 100 GiB avail 2026-03-08T23:55:38.826 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:38 vm10 ceph-mon[48982]: osdmap e28: 6 total, 5 up, 6 in 2026-03-08T23:55:38.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:38 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-08T23:55:38.849 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:38 vm04 ceph-mon[51053]: purged_snaps scrub starts 2026-03-08T23:55:38.849 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:38 vm04 ceph-mon[51053]: purged_snaps scrub ok 2026-03-08T23:55:38.849 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:38 vm04 ceph-mon[51053]: from='client.? 192.168.123.110:0/2362672469' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-08T23:55:38.849 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:38 vm04 ceph-mon[51053]: pgmap v53: 1 pgs: 1 active+clean; 449 KiB data, 28 MiB used, 100 GiB / 100 GiB avail 2026-03-08T23:55:38.849 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:38 vm04 ceph-mon[51053]: osdmap e28: 6 total, 5 up, 6 in 2026-03-08T23:55:38.849 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:38 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-08T23:55:38.849 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:38 vm04 ceph-mon[46823]: purged_snaps scrub starts 2026-03-08T23:55:38.849 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:38 vm04 ceph-mon[46823]: purged_snaps scrub ok 2026-03-08T23:55:38.849 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:38 vm04 ceph-mon[46823]: from='client.? 192.168.123.110:0/2362672469' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-08T23:55:38.849 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:38 vm04 ceph-mon[46823]: pgmap v53: 1 pgs: 1 active+clean; 449 KiB data, 28 MiB used, 100 GiB / 100 GiB avail 2026-03-08T23:55:38.849 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:38 vm04 ceph-mon[46823]: osdmap e28: 6 total, 5 up, 6 in 2026-03-08T23:55:38.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:38 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-08T23:55:40.187 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:40 vm10 ceph-mon[48982]: osdmap e29: 6 total, 5 up, 6 in 2026-03-08T23:55:40.187 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:40 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-08T23:55:40.349 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:40 vm04 ceph-mon[51053]: osdmap e29: 6 total, 5 up, 6 in 2026-03-08T23:55:40.349 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:40 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-08T23:55:40.349 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:40 vm04 ceph-mon[46823]: osdmap e29: 6 total, 5 up, 6 in 2026-03-08T23:55:40.349 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:40 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-08T23:55:41.210 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:41 vm10 ceph-mon[48982]: pgmap v56: 1 pgs: 1 active+clean; 449 KiB data, 28 MiB used, 100 GiB / 100 GiB avail 2026-03-08T23:55:41.210 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:41 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.5"}]: dispatch 2026-03-08T23:55:41.210 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:41 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:55:41.349 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:41 vm04 ceph-mon[51053]: pgmap v56: 1 pgs: 1 active+clean; 449 KiB data, 28 MiB used, 100 GiB / 100 GiB avail 2026-03-08T23:55:41.349 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:41 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.5"}]: dispatch 2026-03-08T23:55:41.349 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:41 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:55:41.349 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:41 vm04 ceph-mon[46823]: pgmap v56: 1 pgs: 1 active+clean; 449 KiB data, 28 MiB used, 100 GiB / 100 GiB avail 2026-03-08T23:55:41.349 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:41 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.5"}]: dispatch 2026-03-08T23:55:41.349 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:41 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:55:42.048 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:42 vm10 ceph-mon[48982]: Deploying daemon osd.5 on vm10 2026-03-08T23:55:42.349 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:42 vm04 ceph-mon[51053]: Deploying daemon osd.5 on vm10 2026-03-08T23:55:42.349 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:42 vm04 ceph-mon[46823]: Deploying daemon osd.5 on vm10 2026-03-08T23:55:43.572 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:43 vm10 ceph-mon[48982]: pgmap v57: 1 pgs: 1 active+clean; 449 KiB data, 28 MiB used, 100 GiB / 100 GiB avail; 76 KiB/s, 0 objects/s recovering 2026-03-08T23:55:43.572 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:43 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:43.572 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:43 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:55:43.572 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:43 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:55:43.572 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:43 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:55:43.599 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:43 vm04 ceph-mon[51053]: pgmap v57: 1 pgs: 1 active+clean; 449 KiB data, 28 MiB used, 100 GiB / 100 GiB avail; 76 KiB/s, 0 objects/s recovering 2026-03-08T23:55:43.599 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:43 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:43.599 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:43 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:55:43.599 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:43 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:55:43.599 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:43 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:55:43.599 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:43 vm04 ceph-mon[46823]: pgmap v57: 1 pgs: 1 active+clean; 449 KiB data, 28 MiB used, 100 GiB / 100 GiB avail; 76 KiB/s, 0 objects/s recovering 2026-03-08T23:55:43.599 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:43 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:43.599 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:43 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:55:43.599 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:43 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:55:43.599 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:43 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:55:44.030 INFO:teuthology.orchestra.run.vm10.stdout:Created osd(s) 5 on host 'vm10' 2026-03-08T23:55:44.088 DEBUG:teuthology.orchestra.run.vm10:osd.5> sudo journalctl -f -n 0 -u ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@osd.5.service 2026-03-08T23:55:44.089 INFO:tasks.cephadm:Deploying osd.6 on vm10 with /dev/vdc... 2026-03-08T23:55:44.089 DEBUG:teuthology.orchestra.run.vm10:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -- lvm zap /dev/vdc 2026-03-08T23:55:44.429 INFO:journalctl@ceph.osd.5.vm10.stdout:Mar 08 23:55:44 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-5[55265]: 2026-03-08T23:55:44.164+0000 7ff5975193c0 -1 osd.5 0 log_to_monitors true 2026-03-08T23:55:45.344 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:45 vm10 ceph-mon[48982]: pgmap v58: 1 pgs: 1 active+clean; 449 KiB data, 29 MiB used, 100 GiB / 100 GiB avail; 59 KiB/s, 0 objects/s recovering 2026-03-08T23:55:45.344 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:45 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:45.344 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:45 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:45.344 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:45 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:45.344 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:45 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:55:45.344 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:45 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:55:45.344 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:45 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:55:45.344 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:45 vm10 ceph-mon[48982]: from='osd.5 [v2:192.168.123.110:6808/614383691,v1:192.168.123.110:6809/614383691]' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-08T23:55:45.344 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:45 vm10 ceph-mon[48982]: from='osd.5 ' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-08T23:55:45.599 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:45 vm04 ceph-mon[51053]: pgmap v58: 1 pgs: 1 active+clean; 449 KiB data, 29 MiB used, 100 GiB / 100 GiB avail; 59 KiB/s, 0 objects/s recovering 2026-03-08T23:55:45.599 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:45 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:45.599 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:45 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:45.599 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:45 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:45.599 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:45 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:55:45.599 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:45 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:55:45.599 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:45 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:55:45.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:45 vm04 ceph-mon[51053]: from='osd.5 [v2:192.168.123.110:6808/614383691,v1:192.168.123.110:6809/614383691]' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-08T23:55:45.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:45 vm04 ceph-mon[51053]: from='osd.5 ' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-08T23:55:45.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:45 vm04 ceph-mon[46823]: pgmap v58: 1 pgs: 1 active+clean; 449 KiB data, 29 MiB used, 100 GiB / 100 GiB avail; 59 KiB/s, 0 objects/s recovering 2026-03-08T23:55:45.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:45 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:45.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:45 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:45.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:45 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:45.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:45 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:55:45.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:45 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:55:45.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:45 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:55:45.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:45 vm04 ceph-mon[46823]: from='osd.5 [v2:192.168.123.110:6808/614383691,v1:192.168.123.110:6809/614383691]' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-08T23:55:45.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:45 vm04 ceph-mon[46823]: from='osd.5 ' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-08T23:55:46.368 INFO:teuthology.orchestra.run.vm10.stdout: 2026-03-08T23:55:46.381 DEBUG:teuthology.orchestra.run.vm10:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -- ceph orch daemon add osd vm10:/dev/vdc 2026-03-08T23:55:46.956 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:46 vm10 ceph-mon[48982]: from='osd.5 ' entity='osd.5' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]': finished 2026-03-08T23:55:46.956 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:46 vm10 ceph-mon[48982]: osdmap e30: 6 total, 5 up, 6 in 2026-03-08T23:55:46.956 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:46 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-08T23:55:46.956 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:46 vm10 ceph-mon[48982]: from='osd.5 [v2:192.168.123.110:6808/614383691,v1:192.168.123.110:6809/614383691]' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm10", "root=default"]}]: dispatch 2026-03-08T23:55:46.956 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:46 vm10 ceph-mon[48982]: from='osd.5 ' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm10", "root=default"]}]: dispatch 2026-03-08T23:55:46.956 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:46 vm10 ceph-mon[48982]: pgmap v60: 1 pgs: 1 active+clean; 449 KiB data, 28 MiB used, 100 GiB / 100 GiB avail; 51 KiB/s, 0 objects/s recovering 2026-03-08T23:55:46.956 INFO:journalctl@ceph.osd.5.vm10.stdout:Mar 08 23:55:46 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-5[55265]: 2026-03-08T23:55:46.739+0000 7ff58df1c700 -1 osd.5 0 waiting for initial osdmap 2026-03-08T23:55:46.956 INFO:journalctl@ceph.osd.5.vm10.stdout:Mar 08 23:55:46 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-5[55265]: 2026-03-08T23:55:46.753+0000 7ff5880b2700 -1 osd.5 31 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-08T23:55:47.099 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:46 vm04 ceph-mon[51053]: from='osd.5 ' entity='osd.5' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]': finished 2026-03-08T23:55:47.099 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:46 vm04 ceph-mon[51053]: osdmap e30: 6 total, 5 up, 6 in 2026-03-08T23:55:47.099 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:46 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-08T23:55:47.099 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:46 vm04 ceph-mon[51053]: from='osd.5 [v2:192.168.123.110:6808/614383691,v1:192.168.123.110:6809/614383691]' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm10", "root=default"]}]: dispatch 2026-03-08T23:55:47.099 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:46 vm04 ceph-mon[51053]: from='osd.5 ' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm10", "root=default"]}]: dispatch 2026-03-08T23:55:47.099 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:46 vm04 ceph-mon[51053]: pgmap v60: 1 pgs: 1 active+clean; 449 KiB data, 28 MiB used, 100 GiB / 100 GiB avail; 51 KiB/s, 0 objects/s recovering 2026-03-08T23:55:47.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:46 vm04 ceph-mon[46823]: from='osd.5 ' entity='osd.5' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]': finished 2026-03-08T23:55:47.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:46 vm04 ceph-mon[46823]: osdmap e30: 6 total, 5 up, 6 in 2026-03-08T23:55:47.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:46 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-08T23:55:47.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:46 vm04 ceph-mon[46823]: from='osd.5 [v2:192.168.123.110:6808/614383691,v1:192.168.123.110:6809/614383691]' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm10", "root=default"]}]: dispatch 2026-03-08T23:55:47.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:46 vm04 ceph-mon[46823]: from='osd.5 ' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm10", "root=default"]}]: dispatch 2026-03-08T23:55:47.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:46 vm04 ceph-mon[46823]: pgmap v60: 1 pgs: 1 active+clean; 449 KiB data, 28 MiB used, 100 GiB / 100 GiB avail; 51 KiB/s, 0 objects/s recovering 2026-03-08T23:55:47.744 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:47 vm10 ceph-mon[48982]: purged_snaps scrub starts 2026-03-08T23:55:47.744 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:47 vm10 ceph-mon[48982]: purged_snaps scrub ok 2026-03-08T23:55:47.744 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:47 vm10 ceph-mon[48982]: from='osd.5 ' entity='osd.5' cmd='[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm10", "root=default"]}]': finished 2026-03-08T23:55:47.745 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:47 vm10 ceph-mon[48982]: osdmap e31: 6 total, 5 up, 6 in 2026-03-08T23:55:47.745 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:47 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-08T23:55:47.745 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:47 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-08T23:55:47.745 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:47 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-08T23:55:47.745 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:47 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-08T23:55:47.745 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:47 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:55:47.745 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:47 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:47.745 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:47 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:55:47.745 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:47 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.5", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:55:47.745 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:47 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:48.099 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:47 vm04 ceph-mon[51053]: purged_snaps scrub starts 2026-03-08T23:55:48.099 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:47 vm04 ceph-mon[51053]: purged_snaps scrub ok 2026-03-08T23:55:48.099 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:47 vm04 ceph-mon[51053]: from='osd.5 ' entity='osd.5' cmd='[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm10", "root=default"]}]': finished 2026-03-08T23:55:48.099 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:47 vm04 ceph-mon[51053]: osdmap e31: 6 total, 5 up, 6 in 2026-03-08T23:55:48.099 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:47 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-08T23:55:48.099 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:47 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-08T23:55:48.099 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:47 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-08T23:55:48.099 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:47 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-08T23:55:48.099 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:47 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:55:48.099 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:47 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:48.099 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:47 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:55:48.099 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:47 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.5", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:55:48.099 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:47 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:48.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:47 vm04 ceph-mon[46823]: purged_snaps scrub starts 2026-03-08T23:55:48.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:47 vm04 ceph-mon[46823]: purged_snaps scrub ok 2026-03-08T23:55:48.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:47 vm04 ceph-mon[46823]: from='osd.5 ' entity='osd.5' cmd='[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm10", "root=default"]}]': finished 2026-03-08T23:55:48.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:47 vm04 ceph-mon[46823]: osdmap e31: 6 total, 5 up, 6 in 2026-03-08T23:55:48.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:47 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-08T23:55:48.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:47 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-08T23:55:48.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:47 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-08T23:55:48.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:47 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-08T23:55:48.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:47 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:55:48.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:47 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:48.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:47 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:55:48.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:47 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.5", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:55:48.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:47 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:49.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:48 vm10 ceph-mon[48982]: from='client.24209 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm10:/dev/vdc", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:55:49.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:48 vm10 ceph-mon[48982]: Detected new or changed devices on vm10 2026-03-08T23:55:49.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:48 vm10 ceph-mon[48982]: Adjusting osd_memory_target on vm10 to 128.5M 2026-03-08T23:55:49.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:48 vm10 ceph-mon[48982]: Unable to set osd_memory_target on vm10 to 134768230: error parsing value: Value '134768230' is below minimum 939524096 2026-03-08T23:55:49.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:48 vm10 ceph-mon[48982]: pgmap v62: 1 pgs: 1 active+clean; 449 KiB data, 28 MiB used, 100 GiB / 100 GiB avail; 49 KiB/s, 0 objects/s recovering 2026-03-08T23:55:49.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:48 vm10 ceph-mon[48982]: osd.5 [v2:192.168.123.110:6808/614383691,v1:192.168.123.110:6809/614383691] boot 2026-03-08T23:55:49.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:48 vm10 ceph-mon[48982]: osdmap e32: 6 total, 6 up, 6 in 2026-03-08T23:55:49.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:48 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-08T23:55:49.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:48 vm10 ceph-mon[48982]: from='client.? 192.168.123.110:0/1114636285' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "e5c2cd5a-74db-44b2-8a4f-525ffaba40f9"}]: dispatch 2026-03-08T23:55:49.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:48 vm10 ceph-mon[48982]: from='client.? 192.168.123.110:0/1114636285' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "e5c2cd5a-74db-44b2-8a4f-525ffaba40f9"}]': finished 2026-03-08T23:55:49.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:48 vm10 ceph-mon[48982]: osdmap e33: 7 total, 6 up, 7 in 2026-03-08T23:55:49.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:48 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-08T23:55:49.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:48 vm10 ceph-mon[48982]: from='client.? 192.168.123.110:0/940689676' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-08T23:55:49.099 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:48 vm04 ceph-mon[51053]: from='client.24209 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm10:/dev/vdc", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:55:49.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:48 vm04 ceph-mon[51053]: Detected new or changed devices on vm10 2026-03-08T23:55:49.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:48 vm04 ceph-mon[51053]: Adjusting osd_memory_target on vm10 to 128.5M 2026-03-08T23:55:49.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:48 vm04 ceph-mon[51053]: Unable to set osd_memory_target on vm10 to 134768230: error parsing value: Value '134768230' is below minimum 939524096 2026-03-08T23:55:49.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:48 vm04 ceph-mon[51053]: pgmap v62: 1 pgs: 1 active+clean; 449 KiB data, 28 MiB used, 100 GiB / 100 GiB avail; 49 KiB/s, 0 objects/s recovering 2026-03-08T23:55:49.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:48 vm04 ceph-mon[51053]: osd.5 [v2:192.168.123.110:6808/614383691,v1:192.168.123.110:6809/614383691] boot 2026-03-08T23:55:49.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:48 vm04 ceph-mon[51053]: osdmap e32: 6 total, 6 up, 6 in 2026-03-08T23:55:49.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:48 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-08T23:55:49.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:48 vm04 ceph-mon[51053]: from='client.? 192.168.123.110:0/1114636285' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "e5c2cd5a-74db-44b2-8a4f-525ffaba40f9"}]: dispatch 2026-03-08T23:55:49.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:48 vm04 ceph-mon[51053]: from='client.? 192.168.123.110:0/1114636285' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "e5c2cd5a-74db-44b2-8a4f-525ffaba40f9"}]': finished 2026-03-08T23:55:49.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:48 vm04 ceph-mon[51053]: osdmap e33: 7 total, 6 up, 7 in 2026-03-08T23:55:49.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:48 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-08T23:55:49.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:48 vm04 ceph-mon[51053]: from='client.? 192.168.123.110:0/940689676' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-08T23:55:49.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:48 vm04 ceph-mon[46823]: from='client.24209 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm10:/dev/vdc", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:55:49.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:48 vm04 ceph-mon[46823]: Detected new or changed devices on vm10 2026-03-08T23:55:49.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:48 vm04 ceph-mon[46823]: Adjusting osd_memory_target on vm10 to 128.5M 2026-03-08T23:55:49.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:48 vm04 ceph-mon[46823]: Unable to set osd_memory_target on vm10 to 134768230: error parsing value: Value '134768230' is below minimum 939524096 2026-03-08T23:55:49.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:48 vm04 ceph-mon[46823]: pgmap v62: 1 pgs: 1 active+clean; 449 KiB data, 28 MiB used, 100 GiB / 100 GiB avail; 49 KiB/s, 0 objects/s recovering 2026-03-08T23:55:49.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:48 vm04 ceph-mon[46823]: osd.5 [v2:192.168.123.110:6808/614383691,v1:192.168.123.110:6809/614383691] boot 2026-03-08T23:55:49.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:48 vm04 ceph-mon[46823]: osdmap e32: 6 total, 6 up, 6 in 2026-03-08T23:55:49.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:48 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-08T23:55:49.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:48 vm04 ceph-mon[46823]: from='client.? 192.168.123.110:0/1114636285' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "e5c2cd5a-74db-44b2-8a4f-525ffaba40f9"}]: dispatch 2026-03-08T23:55:49.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:48 vm04 ceph-mon[46823]: from='client.? 192.168.123.110:0/1114636285' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "e5c2cd5a-74db-44b2-8a4f-525ffaba40f9"}]': finished 2026-03-08T23:55:49.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:48 vm04 ceph-mon[46823]: osdmap e33: 7 total, 6 up, 7 in 2026-03-08T23:55:49.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:48 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-08T23:55:49.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:48 vm04 ceph-mon[46823]: from='client.? 192.168.123.110:0/940689676' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-08T23:55:50.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:50 vm10 ceph-mon[48982]: osdmap e34: 7 total, 6 up, 7 in 2026-03-08T23:55:50.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:50 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-08T23:55:50.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:50 vm10 ceph-mon[48982]: pgmap v66: 1 pgs: 1 active+clean; 449 KiB data, 34 MiB used, 120 GiB / 120 GiB avail 2026-03-08T23:55:50.599 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:50 vm04 ceph-mon[51053]: osdmap e34: 7 total, 6 up, 7 in 2026-03-08T23:55:50.599 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:50 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-08T23:55:50.599 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:50 vm04 ceph-mon[51053]: pgmap v66: 1 pgs: 1 active+clean; 449 KiB data, 34 MiB used, 120 GiB / 120 GiB avail 2026-03-08T23:55:50.599 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:50 vm04 ceph-mon[46823]: osdmap e34: 7 total, 6 up, 7 in 2026-03-08T23:55:50.599 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:50 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-08T23:55:50.599 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:50 vm04 ceph-mon[46823]: pgmap v66: 1 pgs: 1 active+clean; 449 KiB data, 34 MiB used, 120 GiB / 120 GiB avail 2026-03-08T23:55:52.599 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:52 vm04 ceph-mon[51053]: pgmap v67: 1 pgs: 1 active+clean; 449 KiB data, 34 MiB used, 120 GiB / 120 GiB avail 2026-03-08T23:55:52.633 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:52 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.6"}]: dispatch 2026-03-08T23:55:52.633 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:52 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:55:52.633 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:52 vm04 ceph-mon[51053]: Deploying daemon osd.6 on vm10 2026-03-08T23:55:52.633 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:52 vm04 ceph-mon[46823]: pgmap v67: 1 pgs: 1 active+clean; 449 KiB data, 34 MiB used, 120 GiB / 120 GiB avail 2026-03-08T23:55:52.633 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:52 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.6"}]: dispatch 2026-03-08T23:55:52.633 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:52 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:55:52.633 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:52 vm04 ceph-mon[46823]: Deploying daemon osd.6 on vm10 2026-03-08T23:55:52.772 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:52 vm10 ceph-mon[48982]: pgmap v67: 1 pgs: 1 active+clean; 449 KiB data, 34 MiB used, 120 GiB / 120 GiB avail 2026-03-08T23:55:52.772 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:52 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.6"}]: dispatch 2026-03-08T23:55:52.772 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:52 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:55:52.772 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:52 vm10 ceph-mon[48982]: Deploying daemon osd.6 on vm10 2026-03-08T23:55:55.040 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:54 vm10 ceph-mon[48982]: pgmap v68: 1 pgs: 1 active+recovering; 449 KiB data, 35 MiB used, 120 GiB / 120 GiB avail 2026-03-08T23:55:55.040 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:54 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:55:55.040 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:54 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:55:55.040 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:54 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:55:55.040 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:54 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:55.099 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:54 vm04 ceph-mon[51053]: pgmap v68: 1 pgs: 1 active+recovering; 449 KiB data, 35 MiB used, 120 GiB / 120 GiB avail 2026-03-08T23:55:55.133 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:54 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:55:55.133 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:54 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:55:55.133 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:54 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:55:55.133 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:54 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:55.133 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:54 vm04 ceph-mon[46823]: pgmap v68: 1 pgs: 1 active+recovering; 449 KiB data, 35 MiB used, 120 GiB / 120 GiB avail 2026-03-08T23:55:55.133 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:54 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:55:55.133 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:54 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:55:55.133 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:54 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:55:55.133 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:54 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:56.572 INFO:teuthology.orchestra.run.vm10.stdout:Created osd(s) 6 on host 'vm10' 2026-03-08T23:55:56.637 DEBUG:teuthology.orchestra.run.vm10:osd.6> sudo journalctl -f -n 0 -u ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@osd.6.service 2026-03-08T23:55:56.639 INFO:tasks.cephadm:Deploying osd.7 on vm10 with /dev/vdb... 2026-03-08T23:55:56.639 DEBUG:teuthology.orchestra.run.vm10:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -- lvm zap /dev/vdb 2026-03-08T23:55:57.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:57 vm10 ceph-mon[48982]: pgmap v69: 1 pgs: 1 active+recovering; 449 KiB data, 35 MiB used, 120 GiB / 120 GiB avail 2026-03-08T23:55:57.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:57 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:57.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:57 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:57.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:57 vm10 ceph-mon[48982]: from='osd.6 [v2:192.168.123.110:6816/2965253276,v1:192.168.123.110:6817/2965253276]' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-08T23:55:57.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:57 vm10 ceph-mon[48982]: from='osd.6 ' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-08T23:55:57.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:57 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:57.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:57 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:55:57.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:57 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:55:57.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:57 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:55:57.349 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:57 vm04 ceph-mon[51053]: pgmap v69: 1 pgs: 1 active+recovering; 449 KiB data, 35 MiB used, 120 GiB / 120 GiB avail 2026-03-08T23:55:57.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:57 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:57.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:57 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:57.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:57 vm04 ceph-mon[51053]: from='osd.6 [v2:192.168.123.110:6816/2965253276,v1:192.168.123.110:6817/2965253276]' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-08T23:55:57.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:57 vm04 ceph-mon[51053]: from='osd.6 ' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-08T23:55:57.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:57 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:57.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:57 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:55:57.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:57 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:55:57.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:57 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:55:57.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:57 vm04 ceph-mon[46823]: pgmap v69: 1 pgs: 1 active+recovering; 449 KiB data, 35 MiB used, 120 GiB / 120 GiB avail 2026-03-08T23:55:57.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:57 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:57.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:57 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:57.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:57 vm04 ceph-mon[46823]: from='osd.6 [v2:192.168.123.110:6816/2965253276,v1:192.168.123.110:6817/2965253276]' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-08T23:55:57.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:57 vm04 ceph-mon[46823]: from='osd.6 ' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-08T23:55:57.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:57 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:57.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:57 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:55:57.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:57 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:55:57.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:57 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:55:58.495 INFO:teuthology.orchestra.run.vm10.stdout: 2026-03-08T23:55:58.510 DEBUG:teuthology.orchestra.run.vm10:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -- ceph orch daemon add osd vm10:/dev/vdb 2026-03-08T23:55:58.680 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 08 23:55:58 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-6[58006]: 2026-03-08T23:55:58.482+0000 7f719af4e700 -1 osd.6 0 waiting for initial osdmap 2026-03-08T23:55:58.680 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 08 23:55:58 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-6[58006]: 2026-03-08T23:55:58.492+0000 7f71950e4700 -1 osd.6 36 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-08T23:55:58.680 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:58 vm10 ceph-mon[48982]: from='osd.6 ' entity='osd.6' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]': finished 2026-03-08T23:55:58.680 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:58 vm10 ceph-mon[48982]: osdmap e35: 7 total, 6 up, 7 in 2026-03-08T23:55:58.680 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:58 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-08T23:55:58.680 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:58 vm10 ceph-mon[48982]: from='osd.6 [v2:192.168.123.110:6816/2965253276,v1:192.168.123.110:6817/2965253276]' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm10", "root=default"]}]: dispatch 2026-03-08T23:55:58.680 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:58 vm10 ceph-mon[48982]: from='osd.6 ' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm10", "root=default"]}]: dispatch 2026-03-08T23:55:58.681 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:58 vm10 ceph-mon[48982]: pgmap v71: 1 pgs: 1 active+recovering; 449 KiB data, 34 MiB used, 120 GiB / 120 GiB avail 2026-03-08T23:55:58.681 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:58 vm10 ceph-mon[48982]: Detected new or changed devices on vm10 2026-03-08T23:55:58.681 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:58 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:58.681 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:58 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:55:58.681 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:58 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.5", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:55:58.681 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:58 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.6", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:55:58.681 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:58 vm10 ceph-mon[48982]: Adjusting osd_memory_target on vm10 to 87739k 2026-03-08T23:55:58.681 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:58 vm10 ceph-mon[48982]: Unable to set osd_memory_target on vm10 to 89845486: error parsing value: Value '89845486' is below minimum 939524096 2026-03-08T23:55:58.681 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:58 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:58.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:58 vm04 ceph-mon[51053]: from='osd.6 ' entity='osd.6' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]': finished 2026-03-08T23:55:58.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:58 vm04 ceph-mon[51053]: osdmap e35: 7 total, 6 up, 7 in 2026-03-08T23:55:58.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:58 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-08T23:55:58.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:58 vm04 ceph-mon[51053]: from='osd.6 [v2:192.168.123.110:6816/2965253276,v1:192.168.123.110:6817/2965253276]' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm10", "root=default"]}]: dispatch 2026-03-08T23:55:58.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:58 vm04 ceph-mon[51053]: from='osd.6 ' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm10", "root=default"]}]: dispatch 2026-03-08T23:55:58.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:58 vm04 ceph-mon[51053]: pgmap v71: 1 pgs: 1 active+recovering; 449 KiB data, 34 MiB used, 120 GiB / 120 GiB avail 2026-03-08T23:55:58.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:58 vm04 ceph-mon[51053]: Detected new or changed devices on vm10 2026-03-08T23:55:58.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:58 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:58.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:58 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:55:58.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:58 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.5", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:55:58.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:58 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.6", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:55:58.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:58 vm04 ceph-mon[51053]: Adjusting osd_memory_target on vm10 to 87739k 2026-03-08T23:55:58.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:58 vm04 ceph-mon[51053]: Unable to set osd_memory_target on vm10 to 89845486: error parsing value: Value '89845486' is below minimum 939524096 2026-03-08T23:55:58.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:58 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:58.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:58 vm04 ceph-mon[46823]: from='osd.6 ' entity='osd.6' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]': finished 2026-03-08T23:55:58.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:58 vm04 ceph-mon[46823]: osdmap e35: 7 total, 6 up, 7 in 2026-03-08T23:55:58.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:58 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-08T23:55:58.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:58 vm04 ceph-mon[46823]: from='osd.6 [v2:192.168.123.110:6816/2965253276,v1:192.168.123.110:6817/2965253276]' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm10", "root=default"]}]: dispatch 2026-03-08T23:55:58.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:58 vm04 ceph-mon[46823]: from='osd.6 ' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm10", "root=default"]}]: dispatch 2026-03-08T23:55:58.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:58 vm04 ceph-mon[46823]: pgmap v71: 1 pgs: 1 active+recovering; 449 KiB data, 34 MiB used, 120 GiB / 120 GiB avail 2026-03-08T23:55:58.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:58 vm04 ceph-mon[46823]: Detected new or changed devices on vm10 2026-03-08T23:55:58.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:58 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:58.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:58 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:55:58.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:58 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.5", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:55:58.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:58 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.6", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:55:58.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:58 vm04 ceph-mon[46823]: Adjusting osd_memory_target on vm10 to 87739k 2026-03-08T23:55:58.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:58 vm04 ceph-mon[46823]: Unable to set osd_memory_target on vm10 to 89845486: error parsing value: Value '89845486' is below minimum 939524096 2026-03-08T23:55:58.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:58 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:55:59.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:59 vm10 ceph-mon[48982]: from='osd.6 ' entity='osd.6' cmd='[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm10", "root=default"]}]': finished 2026-03-08T23:55:59.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:59 vm10 ceph-mon[48982]: osdmap e36: 7 total, 6 up, 7 in 2026-03-08T23:55:59.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:59 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-08T23:55:59.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:59 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-08T23:55:59.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:59 vm10 ceph-mon[48982]: from='client.24236 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm10:/dev/vdb", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:55:59.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:59 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-08T23:55:59.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:59 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-08T23:55:59.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:59 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:55:59.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:59 vm10 ceph-mon[48982]: osd.6 [v2:192.168.123.110:6816/2965253276,v1:192.168.123.110:6817/2965253276] boot 2026-03-08T23:55:59.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:59 vm10 ceph-mon[48982]: osdmap e37: 7 total, 7 up, 7 in 2026-03-08T23:55:59.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:55:59 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-08T23:55:59.849 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:59 vm04 ceph-mon[51053]: from='osd.6 ' entity='osd.6' cmd='[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm10", "root=default"]}]': finished 2026-03-08T23:55:59.853 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:59 vm04 ceph-mon[51053]: osdmap e36: 7 total, 6 up, 7 in 2026-03-08T23:55:59.853 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:59 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-08T23:55:59.854 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:59 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-08T23:55:59.854 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:59 vm04 ceph-mon[51053]: from='client.24236 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm10:/dev/vdb", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:55:59.854 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:59 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-08T23:55:59.854 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:59 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-08T23:55:59.854 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:59 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:55:59.854 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:59 vm04 ceph-mon[51053]: osd.6 [v2:192.168.123.110:6816/2965253276,v1:192.168.123.110:6817/2965253276] boot 2026-03-08T23:55:59.854 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:59 vm04 ceph-mon[51053]: osdmap e37: 7 total, 7 up, 7 in 2026-03-08T23:55:59.854 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:55:59 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-08T23:55:59.854 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:59 vm04 ceph-mon[46823]: from='osd.6 ' entity='osd.6' cmd='[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm10", "root=default"]}]': finished 2026-03-08T23:55:59.854 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:59 vm04 ceph-mon[46823]: osdmap e36: 7 total, 6 up, 7 in 2026-03-08T23:55:59.854 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:59 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-08T23:55:59.854 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:59 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-08T23:55:59.854 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:59 vm04 ceph-mon[46823]: from='client.24236 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm10:/dev/vdb", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:55:59.854 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:59 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-08T23:55:59.854 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:59 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-08T23:55:59.854 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:59 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:55:59.854 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:59 vm04 ceph-mon[46823]: osd.6 [v2:192.168.123.110:6816/2965253276,v1:192.168.123.110:6817/2965253276] boot 2026-03-08T23:55:59.854 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:59 vm04 ceph-mon[46823]: osdmap e37: 7 total, 7 up, 7 in 2026-03-08T23:55:59.854 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:55:59 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-08T23:56:00.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:00 vm10 ceph-mon[48982]: purged_snaps scrub starts 2026-03-08T23:56:00.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:00 vm10 ceph-mon[48982]: purged_snaps scrub ok 2026-03-08T23:56:00.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:00 vm10 ceph-mon[48982]: pgmap v74: 1 pgs: 1 active+clean; 449 KiB data, 40 MiB used, 140 GiB / 140 GiB avail; 53 KiB/s, 0 objects/s recovering 2026-03-08T23:56:00.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:00 vm10 ceph-mon[48982]: from='client.? 192.168.123.110:0/1438839718' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "c2a6868a-a44a-4a09-a55c-d1145ef3d398"}]: dispatch 2026-03-08T23:56:00.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:00 vm10 ceph-mon[48982]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "c2a6868a-a44a-4a09-a55c-d1145ef3d398"}]: dispatch 2026-03-08T23:56:00.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:00 vm10 ceph-mon[48982]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "c2a6868a-a44a-4a09-a55c-d1145ef3d398"}]': finished 2026-03-08T23:56:00.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:00 vm10 ceph-mon[48982]: osdmap e38: 8 total, 7 up, 8 in 2026-03-08T23:56:00.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:00 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-08T23:56:00.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:00 vm10 ceph-mon[48982]: from='client.? 192.168.123.110:0/1360146690' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-08T23:56:00.849 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:00 vm04 ceph-mon[51053]: purged_snaps scrub starts 2026-03-08T23:56:00.849 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:00 vm04 ceph-mon[51053]: purged_snaps scrub ok 2026-03-08T23:56:00.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:00 vm04 ceph-mon[51053]: pgmap v74: 1 pgs: 1 active+clean; 449 KiB data, 40 MiB used, 140 GiB / 140 GiB avail; 53 KiB/s, 0 objects/s recovering 2026-03-08T23:56:00.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:00 vm04 ceph-mon[51053]: from='client.? 192.168.123.110:0/1438839718' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "c2a6868a-a44a-4a09-a55c-d1145ef3d398"}]: dispatch 2026-03-08T23:56:00.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:00 vm04 ceph-mon[51053]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "c2a6868a-a44a-4a09-a55c-d1145ef3d398"}]: dispatch 2026-03-08T23:56:00.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:00 vm04 ceph-mon[51053]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "c2a6868a-a44a-4a09-a55c-d1145ef3d398"}]': finished 2026-03-08T23:56:00.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:00 vm04 ceph-mon[51053]: osdmap e38: 8 total, 7 up, 8 in 2026-03-08T23:56:00.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:00 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-08T23:56:00.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:00 vm04 ceph-mon[51053]: from='client.? 192.168.123.110:0/1360146690' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-08T23:56:00.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:00 vm04 ceph-mon[46823]: purged_snaps scrub starts 2026-03-08T23:56:00.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:00 vm04 ceph-mon[46823]: purged_snaps scrub ok 2026-03-08T23:56:00.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:00 vm04 ceph-mon[46823]: pgmap v74: 1 pgs: 1 active+clean; 449 KiB data, 40 MiB used, 140 GiB / 140 GiB avail; 53 KiB/s, 0 objects/s recovering 2026-03-08T23:56:00.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:00 vm04 ceph-mon[46823]: from='client.? 192.168.123.110:0/1438839718' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "c2a6868a-a44a-4a09-a55c-d1145ef3d398"}]: dispatch 2026-03-08T23:56:00.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:00 vm04 ceph-mon[46823]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "c2a6868a-a44a-4a09-a55c-d1145ef3d398"}]: dispatch 2026-03-08T23:56:00.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:00 vm04 ceph-mon[46823]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "c2a6868a-a44a-4a09-a55c-d1145ef3d398"}]': finished 2026-03-08T23:56:00.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:00 vm04 ceph-mon[46823]: osdmap e38: 8 total, 7 up, 8 in 2026-03-08T23:56:00.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:00 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-08T23:56:00.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:00 vm04 ceph-mon[46823]: from='client.? 192.168.123.110:0/1360146690' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-08T23:56:02.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:01 vm10 ceph-mon[48982]: osdmap e39: 8 total, 7 up, 8 in 2026-03-08T23:56:02.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:01 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-08T23:56:02.349 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:01 vm04 ceph-mon[51053]: osdmap e39: 8 total, 7 up, 8 in 2026-03-08T23:56:02.349 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:01 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-08T23:56:02.349 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:01 vm04 ceph-mon[46823]: osdmap e39: 8 total, 7 up, 8 in 2026-03-08T23:56:02.349 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:01 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-08T23:56:03.009 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:02 vm10 ceph-mon[48982]: pgmap v77: 1 pgs: 1 active+clean; 449 KiB data, 40 MiB used, 140 GiB / 140 GiB avail; 80 KiB/s, 0 objects/s recovering 2026-03-08T23:56:03.349 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:02 vm04 ceph-mon[51053]: pgmap v77: 1 pgs: 1 active+clean; 449 KiB data, 40 MiB used, 140 GiB / 140 GiB avail; 80 KiB/s, 0 objects/s recovering 2026-03-08T23:56:03.349 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:02 vm04 ceph-mon[46823]: pgmap v77: 1 pgs: 1 active+clean; 449 KiB data, 40 MiB used, 140 GiB / 140 GiB avail; 80 KiB/s, 0 objects/s recovering 2026-03-08T23:56:04.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:03 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.7"}]: dispatch 2026-03-08T23:56:04.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:03 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:56:04.349 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:03 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.7"}]: dispatch 2026-03-08T23:56:04.349 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:03 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:56:04.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:03 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.7"}]: dispatch 2026-03-08T23:56:04.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:03 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:56:05.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:04 vm10 ceph-mon[48982]: Deploying daemon osd.7 on vm10 2026-03-08T23:56:05.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:04 vm10 ceph-mon[48982]: pgmap v78: 1 pgs: 1 active+clean; 449 KiB data, 40 MiB used, 140 GiB / 140 GiB avail 2026-03-08T23:56:05.349 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:04 vm04 ceph-mon[51053]: Deploying daemon osd.7 on vm10 2026-03-08T23:56:05.349 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:04 vm04 ceph-mon[51053]: pgmap v78: 1 pgs: 1 active+clean; 449 KiB data, 40 MiB used, 140 GiB / 140 GiB avail 2026-03-08T23:56:05.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:04 vm04 ceph-mon[46823]: Deploying daemon osd.7 on vm10 2026-03-08T23:56:05.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:04 vm04 ceph-mon[46823]: pgmap v78: 1 pgs: 1 active+clean; 449 KiB data, 40 MiB used, 140 GiB / 140 GiB avail 2026-03-08T23:56:06.468 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:06 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:56:06.468 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:06 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:56:06.468 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:06 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:56:06.468 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:06 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:56:06.468 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:06 vm10 ceph-mon[48982]: pgmap v79: 1 pgs: 1 active+recovering; 449 KiB data, 41 MiB used, 140 GiB / 140 GiB avail 2026-03-08T23:56:06.723 INFO:teuthology.orchestra.run.vm10.stdout:Created osd(s) 7 on host 'vm10' 2026-03-08T23:56:06.791 DEBUG:teuthology.orchestra.run.vm10:osd.7> sudo journalctl -f -n 0 -u ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@osd.7.service 2026-03-08T23:56:06.793 INFO:tasks.cephadm:Waiting for 8 OSDs to come up... 2026-03-08T23:56:06.793 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -- ceph osd stat -f json 2026-03-08T23:56:06.815 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:06 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:56:06.815 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:06 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:56:06.815 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:06 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:56:06.815 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:06 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:56:06.815 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:06 vm04 ceph-mon[46823]: pgmap v79: 1 pgs: 1 active+recovering; 449 KiB data, 41 MiB used, 140 GiB / 140 GiB avail 2026-03-08T23:56:06.815 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:06 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:56:06.815 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:06 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:56:06.815 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:06 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:56:06.815 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:06 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:56:06.815 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:06 vm04 ceph-mon[51053]: pgmap v79: 1 pgs: 1 active+recovering; 449 KiB data, 41 MiB used, 140 GiB / 140 GiB avail 2026-03-08T23:56:07.288 INFO:teuthology.orchestra.run.vm04.stdout: 2026-03-08T23:56:07.334 INFO:teuthology.orchestra.run.vm04.stdout:{"epoch":39,"num_osds":8,"num_up_osds":7,"osd_up_since":1773014159,"num_in_osds":8,"osd_in_since":1773014159,"num_remapped_pgs":0} 2026-03-08T23:56:07.575 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:07 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:56:07.575 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:07 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:56:07.575 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:07 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:56:07.575 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:07 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:56:07.575 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:07 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:56:07.575 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:07 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:56:07.575 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:07 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/2259661790' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-08T23:56:07.575 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:07 vm04 ceph-mon[46823]: from='osd.7 [v2:192.168.123.110:6824/3776598559,v1:192.168.123.110:6825/3776598559]' entity='osd.7' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]: dispatch 2026-03-08T23:56:07.575 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:07 vm04 ceph-mon[46823]: from='osd.7 ' entity='osd.7' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]: dispatch 2026-03-08T23:56:07.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:07 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:56:07.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:07 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:56:07.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:07 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:56:07.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:07 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:56:07.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:07 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:56:07.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:07 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:56:07.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:07 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/2259661790' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-08T23:56:07.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:07 vm10 ceph-mon[48982]: from='osd.7 [v2:192.168.123.110:6824/3776598559,v1:192.168.123.110:6825/3776598559]' entity='osd.7' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]: dispatch 2026-03-08T23:56:07.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:07 vm10 ceph-mon[48982]: from='osd.7 ' entity='osd.7' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]: dispatch 2026-03-08T23:56:07.827 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 08 23:56:07 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-7[60773]: 2026-03-08T23:56:07.354+0000 7f4c316d03c0 -1 osd.7 0 log_to_monitors true 2026-03-08T23:56:07.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:07 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:56:07.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:07 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:56:07.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:07 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:56:07.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:07 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:56:07.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:07 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:56:07.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:07 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:56:07.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:07 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/2259661790' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-08T23:56:07.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:07 vm04 ceph-mon[51053]: from='osd.7 [v2:192.168.123.110:6824/3776598559,v1:192.168.123.110:6825/3776598559]' entity='osd.7' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]: dispatch 2026-03-08T23:56:07.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:07 vm04 ceph-mon[51053]: from='osd.7 ' entity='osd.7' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]: dispatch 2026-03-08T23:56:08.335 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -- ceph osd stat -f json 2026-03-08T23:56:08.858 INFO:teuthology.orchestra.run.vm04.stdout: 2026-03-08T23:56:08.871 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:08 vm04 ceph-mon[51053]: pgmap v80: 1 pgs: 1 active+recovering; 449 KiB data, 41 MiB used, 140 GiB / 140 GiB avail 2026-03-08T23:56:08.871 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:08 vm04 ceph-mon[51053]: from='osd.7 ' entity='osd.7' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]': finished 2026-03-08T23:56:08.871 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:08 vm04 ceph-mon[51053]: osdmap e40: 8 total, 7 up, 8 in 2026-03-08T23:56:08.871 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:08 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-08T23:56:08.871 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:08 vm04 ceph-mon[51053]: from='osd.7 [v2:192.168.123.110:6824/3776598559,v1:192.168.123.110:6825/3776598559]' entity='osd.7' cmd=[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm10", "root=default"]}]: dispatch 2026-03-08T23:56:08.871 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:08 vm04 ceph-mon[51053]: from='osd.7 ' entity='osd.7' cmd=[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm10", "root=default"]}]: dispatch 2026-03-08T23:56:08.871 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:08 vm04 ceph-mon[51053]: Detected new or changed devices on vm10 2026-03-08T23:56:08.871 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:08 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:56:08.871 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:08 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:56:08.871 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:08 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.5", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:56:08.871 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:08 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.6", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:56:08.871 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:08 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.7", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:56:08.871 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:08 vm04 ceph-mon[51053]: Adjusting osd_memory_target on vm10 to 65804k 2026-03-08T23:56:08.871 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:08 vm04 ceph-mon[51053]: Unable to set osd_memory_target on vm10 to 67384115: error parsing value: Value '67384115' is below minimum 939524096 2026-03-08T23:56:08.871 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:08 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:56:08.871 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:08 vm04 ceph-mon[46823]: pgmap v80: 1 pgs: 1 active+recovering; 449 KiB data, 41 MiB used, 140 GiB / 140 GiB avail 2026-03-08T23:56:08.871 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:08 vm04 ceph-mon[46823]: from='osd.7 ' entity='osd.7' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]': finished 2026-03-08T23:56:08.871 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:08 vm04 ceph-mon[46823]: osdmap e40: 8 total, 7 up, 8 in 2026-03-08T23:56:08.871 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:08 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-08T23:56:08.871 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:08 vm04 ceph-mon[46823]: from='osd.7 [v2:192.168.123.110:6824/3776598559,v1:192.168.123.110:6825/3776598559]' entity='osd.7' cmd=[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm10", "root=default"]}]: dispatch 2026-03-08T23:56:08.871 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:08 vm04 ceph-mon[46823]: from='osd.7 ' entity='osd.7' cmd=[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm10", "root=default"]}]: dispatch 2026-03-08T23:56:08.871 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:08 vm04 ceph-mon[46823]: Detected new or changed devices on vm10 2026-03-08T23:56:08.871 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:08 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:56:08.871 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:08 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:56:08.872 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:08 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.5", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:56:08.872 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:08 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.6", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:56:08.872 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:08 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.7", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:56:08.872 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:08 vm04 ceph-mon[46823]: Adjusting osd_memory_target on vm10 to 65804k 2026-03-08T23:56:08.872 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:08 vm04 ceph-mon[46823]: Unable to set osd_memory_target on vm10 to 67384115: error parsing value: Value '67384115' is below minimum 939524096 2026-03-08T23:56:08.872 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:08 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:56:08.930 INFO:teuthology.orchestra.run.vm04.stdout:{"epoch":41,"num_osds":8,"num_up_osds":7,"osd_up_since":1773014159,"num_in_osds":8,"osd_in_since":1773014159,"num_remapped_pgs":0} 2026-03-08T23:56:09.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:08 vm10 ceph-mon[48982]: pgmap v80: 1 pgs: 1 active+recovering; 449 KiB data, 41 MiB used, 140 GiB / 140 GiB avail 2026-03-08T23:56:09.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:08 vm10 ceph-mon[48982]: from='osd.7 ' entity='osd.7' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]': finished 2026-03-08T23:56:09.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:08 vm10 ceph-mon[48982]: osdmap e40: 8 total, 7 up, 8 in 2026-03-08T23:56:09.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:08 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-08T23:56:09.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:08 vm10 ceph-mon[48982]: from='osd.7 [v2:192.168.123.110:6824/3776598559,v1:192.168.123.110:6825/3776598559]' entity='osd.7' cmd=[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm10", "root=default"]}]: dispatch 2026-03-08T23:56:09.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:08 vm10 ceph-mon[48982]: from='osd.7 ' entity='osd.7' cmd=[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm10", "root=default"]}]: dispatch 2026-03-08T23:56:09.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:08 vm10 ceph-mon[48982]: Detected new or changed devices on vm10 2026-03-08T23:56:09.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:08 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:56:09.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:08 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:56:09.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:08 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.5", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:56:09.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:08 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.6", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:56:09.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:08 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.7", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:56:09.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:08 vm10 ceph-mon[48982]: Adjusting osd_memory_target on vm10 to 65804k 2026-03-08T23:56:09.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:08 vm10 ceph-mon[48982]: Unable to set osd_memory_target on vm10 to 67384115: error parsing value: Value '67384115' is below minimum 939524096 2026-03-08T23:56:09.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:08 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:56:09.077 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 08 23:56:08 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-7[60773]: 2026-03-08T23:56:08.733+0000 7f4c280d3700 -1 osd.7 0 waiting for initial osdmap 2026-03-08T23:56:09.077 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 08 23:56:08 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-7[60773]: 2026-03-08T23:56:08.747+0000 7f4c24a6e700 -1 osd.7 41 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-08T23:56:09.932 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -- ceph osd stat -f json 2026-03-08T23:56:10.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:09 vm10 ceph-mon[48982]: from='osd.7 ' entity='osd.7' cmd='[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm10", "root=default"]}]': finished 2026-03-08T23:56:10.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:09 vm10 ceph-mon[48982]: osdmap e41: 8 total, 7 up, 8 in 2026-03-08T23:56:10.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:09 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-08T23:56:10.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:09 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/4114119299' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-08T23:56:10.094 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:09 vm04 ceph-mon[51053]: from='osd.7 ' entity='osd.7' cmd='[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm10", "root=default"]}]': finished 2026-03-08T23:56:10.094 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:09 vm04 ceph-mon[51053]: osdmap e41: 8 total, 7 up, 8 in 2026-03-08T23:56:10.094 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:09 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-08T23:56:10.094 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:09 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/4114119299' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-08T23:56:10.094 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:09 vm04 ceph-mon[46823]: from='osd.7 ' entity='osd.7' cmd='[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm10", "root=default"]}]': finished 2026-03-08T23:56:10.094 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:09 vm04 ceph-mon[46823]: osdmap e41: 8 total, 7 up, 8 in 2026-03-08T23:56:10.094 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:09 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-08T23:56:10.094 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:09 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/4114119299' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-08T23:56:10.395 INFO:teuthology.orchestra.run.vm04.stdout: 2026-03-08T23:56:10.443 INFO:teuthology.orchestra.run.vm04.stdout:{"epoch":42,"num_osds":8,"num_up_osds":8,"osd_up_since":1773014169,"num_in_osds":8,"osd_in_since":1773014159,"num_remapped_pgs":1} 2026-03-08T23:56:10.444 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -- ceph osd dump --format=json 2026-03-08T23:56:10.602 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/mon.a/config 2026-03-08T23:56:10.980 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:10 vm04 ceph-mon[46823]: purged_snaps scrub starts 2026-03-08T23:56:10.980 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:10 vm04 ceph-mon[46823]: purged_snaps scrub ok 2026-03-08T23:56:10.980 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:10 vm04 ceph-mon[46823]: pgmap v83: 1 pgs: 1 active+recovering; 449 KiB data, 41 MiB used, 140 GiB / 140 GiB avail 2026-03-08T23:56:10.980 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:10 vm04 ceph-mon[46823]: osd.7 [v2:192.168.123.110:6824/3776598559,v1:192.168.123.110:6825/3776598559] boot 2026-03-08T23:56:10.980 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:10 vm04 ceph-mon[46823]: osdmap e42: 8 total, 8 up, 8 in 2026-03-08T23:56:10.980 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:10 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-08T23:56:10.980 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:10 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/1810887184' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-08T23:56:10.980 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:10 vm04 ceph-mon[46823]: osdmap e43: 8 total, 8 up, 8 in 2026-03-08T23:56:10.980 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:10 vm04 ceph-mon[51053]: purged_snaps scrub starts 2026-03-08T23:56:10.980 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:10 vm04 ceph-mon[51053]: purged_snaps scrub ok 2026-03-08T23:56:10.980 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:10 vm04 ceph-mon[51053]: pgmap v83: 1 pgs: 1 active+recovering; 449 KiB data, 41 MiB used, 140 GiB / 140 GiB avail 2026-03-08T23:56:10.980 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:10 vm04 ceph-mon[51053]: osd.7 [v2:192.168.123.110:6824/3776598559,v1:192.168.123.110:6825/3776598559] boot 2026-03-08T23:56:10.980 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:10 vm04 ceph-mon[51053]: osdmap e42: 8 total, 8 up, 8 in 2026-03-08T23:56:10.980 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:10 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-08T23:56:10.980 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:10 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/1810887184' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-08T23:56:10.980 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:10 vm04 ceph-mon[51053]: osdmap e43: 8 total, 8 up, 8 in 2026-03-08T23:56:10.981 INFO:teuthology.orchestra.run.vm04.stdout: 2026-03-08T23:56:10.981 INFO:teuthology.orchestra.run.vm04.stdout:{"epoch":43,"fsid":"fdcbddf6-1b49-11f1-80b0-7392062373f9","created":"2026-03-08T23:53:59.990761+0000","modified":"2026-03-08T23:56:10.734262+0000","last_up_change":"2026-03-08T23:56:09.724834+0000","last_in_change":"2026-03-08T23:55:59.871620+0000","flags":"sortbitwise,recovery_deletes,purged_snapdirs,pglog_hardlimit","flags_num":5799936,"flags_set":["pglog_hardlimit","purged_snapdirs","recovery_deletes","sortbitwise"],"crush_version":18,"full_ratio":0.94999998807907104,"backfillfull_ratio":0.89999997615814209,"nearfull_ratio":0.85000002384185791,"cluster_snapshot":"","pool_max":1,"max_osd":8,"require_min_compat_client":"luminous","min_compat_client":"jewel","require_osd_release":"quincy","pools":[{"pool":1,"pool_name":".mgr","create_time":"2026-03-08T23:55:19.841597+0000","flags":1,"flags_names":"hashpspool","type":1,"size":3,"min_size":2,"crush_rule":0,"peering_crush_bucket_count":0,"peering_crush_bucket_target":0,"peering_crush_bucket_barrier":0,"peering_crush_bucket_mandatory_member":2147483647,"object_hash":2,"pg_autoscale_mode":"off","pg_num":1,"pg_placement_num":1,"pg_placement_num_target":1,"pg_num_target":1,"pg_num_pending":1,"last_pg_merge_meta":{"source_pgid":"0.0","ready_epoch":0,"last_epoch_started":0,"last_epoch_clean":0,"source_version":"0'0","target_version":"0'0"},"last_change":"20","last_force_op_resend":"0","last_force_op_resend_prenautilus":"0","last_force_op_resend_preluminous":"0","auid":0,"snap_mode":"selfmanaged","snap_seq":0,"snap_epoch":0,"pool_snaps":[],"removed_snaps":"[]","quota_max_bytes":0,"quota_max_objects":0,"tiers":[],"tier_of":-1,"read_tier":-1,"write_tier":-1,"cache_mode":"none","target_max_bytes":0,"target_max_objects":0,"cache_target_dirty_ratio_micro":400000,"cache_target_dirty_high_ratio_micro":600000,"cache_target_full_ratio_micro":800000,"cache_min_flush_age":0,"cache_min_evict_age":0,"erasure_code_profile":"","hit_set_params":{"type":"none"},"hit_set_period":0,"hit_set_count":0,"use_gmt_hitset":true,"min_read_recency_for_promote":0,"min_write_recency_for_promote":0,"hit_set_grade_decay_rate":0,"hit_set_search_last_n":0,"grade_table":[],"stripe_width":0,"expected_num_objects":0,"fast_read":false,"options":{"pg_num_max":32,"pg_num_min":1},"application_metadata":{"mgr":{}}}],"osds":[{"osd":0,"uuid":"d3ce7c0b-7841-417d-8412-02f631c2946d","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":8,"up_thru":42,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6802","nonce":959618434},{"type":"v1","addr":"192.168.123.104:6803","nonce":959618434}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6804","nonce":959618434},{"type":"v1","addr":"192.168.123.104:6805","nonce":959618434}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6808","nonce":959618434},{"type":"v1","addr":"192.168.123.104:6809","nonce":959618434}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6806","nonce":959618434},{"type":"v1","addr":"192.168.123.104:6807","nonce":959618434}]},"public_addr":"192.168.123.104:6803/959618434","cluster_addr":"192.168.123.104:6805/959618434","heartbeat_back_addr":"192.168.123.104:6809/959618434","heartbeat_front_addr":"192.168.123.104:6807/959618434","state":["exists","up"]},{"osd":1,"uuid":"75d29058-61cd-44da-9ebc-7516b509075d","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":12,"up_thru":28,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6810","nonce":1772469673},{"type":"v1","addr":"192.168.123.104:6811","nonce":1772469673}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6812","nonce":1772469673},{"type":"v1","addr":"192.168.123.104:6813","nonce":1772469673}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6816","nonce":1772469673},{"type":"v1","addr":"192.168.123.104:6817","nonce":1772469673}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6814","nonce":1772469673},{"type":"v1","addr":"192.168.123.104:6815","nonce":1772469673}]},"public_addr":"192.168.123.104:6811/1772469673","cluster_addr":"192.168.123.104:6813/1772469673","heartbeat_back_addr":"192.168.123.104:6817/1772469673","heartbeat_front_addr":"192.168.123.104:6815/1772469673","state":["exists","up"]},{"osd":2,"uuid":"53adb85c-2242-4b5e-a3ed-dfb1b448b743","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":17,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6818","nonce":1494796243},{"type":"v1","addr":"192.168.123.104:6819","nonce":1494796243}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6820","nonce":1494796243},{"type":"v1","addr":"192.168.123.104:6821","nonce":1494796243}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6824","nonce":1494796243},{"type":"v1","addr":"192.168.123.104:6825","nonce":1494796243}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6822","nonce":1494796243},{"type":"v1","addr":"192.168.123.104:6823","nonce":1494796243}]},"public_addr":"192.168.123.104:6819/1494796243","cluster_addr":"192.168.123.104:6821/1494796243","heartbeat_back_addr":"192.168.123.104:6825/1494796243","heartbeat_front_addr":"192.168.123.104:6823/1494796243","state":["exists","up"]},{"osd":3,"uuid":"ac348a8b-4e4c-4ce9-84cd-4eafa34927bb","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":23,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6826","nonce":2755643936},{"type":"v1","addr":"192.168.123.104:6827","nonce":2755643936}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6828","nonce":2755643936},{"type":"v1","addr":"192.168.123.104:6829","nonce":2755643936}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6832","nonce":2755643936},{"type":"v1","addr":"192.168.123.104:6833","nonce":2755643936}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6830","nonce":2755643936},{"type":"v1","addr":"192.168.123.104:6831","nonce":2755643936}]},"public_addr":"192.168.123.104:6827/2755643936","cluster_addr":"192.168.123.104:6829/2755643936","heartbeat_back_addr":"192.168.123.104:6833/2755643936","heartbeat_front_addr":"192.168.123.104:6831/2755643936","state":["exists","up"]},{"osd":4,"uuid":"ffd541f9-68f9-454d-acfc-1323f62f60a0","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":27,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.110:6800","nonce":2012640669},{"type":"v1","addr":"192.168.123.110:6801","nonce":2012640669}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.110:6802","nonce":2012640669},{"type":"v1","addr":"192.168.123.110:6803","nonce":2012640669}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.110:6806","nonce":2012640669},{"type":"v1","addr":"192.168.123.110:6807","nonce":2012640669}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.110:6804","nonce":2012640669},{"type":"v1","addr":"192.168.123.110:6805","nonce":2012640669}]},"public_addr":"192.168.123.110:6801/2012640669","cluster_addr":"192.168.123.110:6803/2012640669","heartbeat_back_addr":"192.168.123.110:6807/2012640669","heartbeat_front_addr":"192.168.123.110:6805/2012640669","state":["exists","up"]},{"osd":5,"uuid":"5efb3808-0928-47a5-97bc-ecad3a99a5e9","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":32,"up_thru":33,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.110:6808","nonce":614383691},{"type":"v1","addr":"192.168.123.110:6809","nonce":614383691}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.110:6810","nonce":614383691},{"type":"v1","addr":"192.168.123.110:6811","nonce":614383691}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.110:6814","nonce":614383691},{"type":"v1","addr":"192.168.123.110:6815","nonce":614383691}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.110:6812","nonce":614383691},{"type":"v1","addr":"192.168.123.110:6813","nonce":614383691}]},"public_addr":"192.168.123.110:6809/614383691","cluster_addr":"192.168.123.110:6811/614383691","heartbeat_back_addr":"192.168.123.110:6815/614383691","heartbeat_front_addr":"192.168.123.110:6813/614383691","state":["exists","up"]},{"osd":6,"uuid":"e5c2cd5a-74db-44b2-8a4f-525ffaba40f9","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":37,"up_thru":38,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.110:6816","nonce":2965253276},{"type":"v1","addr":"192.168.123.110:6817","nonce":2965253276}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.110:6818","nonce":2965253276},{"type":"v1","addr":"192.168.123.110:6819","nonce":2965253276}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.110:6822","nonce":2965253276},{"type":"v1","addr":"192.168.123.110:6823","nonce":2965253276}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.110:6820","nonce":2965253276},{"type":"v1","addr":"192.168.123.110:6821","nonce":2965253276}]},"public_addr":"192.168.123.110:6817/2965253276","cluster_addr":"192.168.123.110:6819/2965253276","heartbeat_back_addr":"192.168.123.110:6823/2965253276","heartbeat_front_addr":"192.168.123.110:6821/2965253276","state":["exists","up"]},{"osd":7,"uuid":"c2a6868a-a44a-4a09-a55c-d1145ef3d398","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":42,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.110:6824","nonce":3776598559},{"type":"v1","addr":"192.168.123.110:6825","nonce":3776598559}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.110:6826","nonce":3776598559},{"type":"v1","addr":"192.168.123.110:6827","nonce":3776598559}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.110:6830","nonce":3776598559},{"type":"v1","addr":"192.168.123.110:6831","nonce":3776598559}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.110:6828","nonce":3776598559},{"type":"v1","addr":"192.168.123.110:6829","nonce":3776598559}]},"public_addr":"192.168.123.110:6825/3776598559","cluster_addr":"192.168.123.110:6827/3776598559","heartbeat_back_addr":"192.168.123.110:6831/3776598559","heartbeat_front_addr":"192.168.123.110:6829/3776598559","state":["exists","up"]}],"osd_xinfo":[{"osd":0,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-08T23:54:59.250466+0000","dead_epoch":0},{"osd":1,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-08T23:55:07.947404+0000","dead_epoch":0},{"osd":2,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-08T23:55:17.425660+0000","dead_epoch":0},{"osd":3,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-08T23:55:27.200744+0000","dead_epoch":0},{"osd":4,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-08T23:55:35.700171+0000","dead_epoch":0},{"osd":5,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-08T23:55:45.216440+0000","dead_epoch":0},{"osd":6,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-08T23:55:57.462852+0000","dead_epoch":0},{"osd":7,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-08T23:56:08.399682+0000","dead_epoch":0}],"pg_upmap":[],"pg_upmap_items":[],"pg_temp":[],"primary_temp":[],"blocklist":{"192.168.123.104:6801/2282925904":"2026-03-09T23:54:25.574455+0000","192.168.123.104:0/4258028872":"2026-03-09T23:54:25.574455+0000","192.168.123.104:0/3816463695":"2026-03-09T23:54:25.574455+0000","192.168.123.104:0/2988861187":"2026-03-09T23:54:25.574455+0000","192.168.123.104:0/1723664430":"2026-03-09T23:54:15.436245+0000","192.168.123.104:0/2663707440":"2026-03-09T23:54:15.436245+0000","192.168.123.104:0/1412084899":"2026-03-09T23:54:15.436245+0000","192.168.123.104:6800/2282925904":"2026-03-09T23:54:25.574455+0000","192.168.123.104:6800/3942489037":"2026-03-09T23:54:15.436245+0000","192.168.123.104:6801/3942489037":"2026-03-09T23:54:15.436245+0000"},"erasure_code_profiles":{"default":{"crush-failure-domain":"osd","k":"2","m":"1","plugin":"jerasure","technique":"reed_sol_van"}},"removed_snaps_queue":[],"new_removed_snaps":[],"new_purged_snaps":[],"crush_node_flags":{},"device_class_flags":{},"stretch_mode":{"stretch_mode_enabled":false,"stretch_bucket_count":0,"degraded_stretch_mode":0,"recovering_stretch_mode":0,"stretch_mode_bucket":0}} 2026-03-08T23:56:11.039 INFO:tasks.cephadm.ceph_manager.ceph:[{'pool': 1, 'pool_name': '.mgr', 'create_time': '2026-03-08T23:55:19.841597+0000', 'flags': 1, 'flags_names': 'hashpspool', 'type': 1, 'size': 3, 'min_size': 2, 'crush_rule': 0, 'peering_crush_bucket_count': 0, 'peering_crush_bucket_target': 0, 'peering_crush_bucket_barrier': 0, 'peering_crush_bucket_mandatory_member': 2147483647, 'object_hash': 2, 'pg_autoscale_mode': 'off', 'pg_num': 1, 'pg_placement_num': 1, 'pg_placement_num_target': 1, 'pg_num_target': 1, 'pg_num_pending': 1, 'last_pg_merge_meta': {'source_pgid': '0.0', 'ready_epoch': 0, 'last_epoch_started': 0, 'last_epoch_clean': 0, 'source_version': "0'0", 'target_version': "0'0"}, 'last_change': '20', 'last_force_op_resend': '0', 'last_force_op_resend_prenautilus': '0', 'last_force_op_resend_preluminous': '0', 'auid': 0, 'snap_mode': 'selfmanaged', 'snap_seq': 0, 'snap_epoch': 0, 'pool_snaps': [], 'removed_snaps': '[]', 'quota_max_bytes': 0, 'quota_max_objects': 0, 'tiers': [], 'tier_of': -1, 'read_tier': -1, 'write_tier': -1, 'cache_mode': 'none', 'target_max_bytes': 0, 'target_max_objects': 0, 'cache_target_dirty_ratio_micro': 400000, 'cache_target_dirty_high_ratio_micro': 600000, 'cache_target_full_ratio_micro': 800000, 'cache_min_flush_age': 0, 'cache_min_evict_age': 0, 'erasure_code_profile': '', 'hit_set_params': {'type': 'none'}, 'hit_set_period': 0, 'hit_set_count': 0, 'use_gmt_hitset': True, 'min_read_recency_for_promote': 0, 'min_write_recency_for_promote': 0, 'hit_set_grade_decay_rate': 0, 'hit_set_search_last_n': 0, 'grade_table': [], 'stripe_width': 0, 'expected_num_objects': 0, 'fast_read': False, 'options': {'pg_num_max': 32, 'pg_num_min': 1}, 'application_metadata': {'mgr': {}}}] 2026-03-08T23:56:11.040 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -- ceph osd pool get .mgr pg_num 2026-03-08T23:56:11.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:10 vm10 ceph-mon[48982]: purged_snaps scrub starts 2026-03-08T23:56:11.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:10 vm10 ceph-mon[48982]: purged_snaps scrub ok 2026-03-08T23:56:11.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:10 vm10 ceph-mon[48982]: pgmap v83: 1 pgs: 1 active+recovering; 449 KiB data, 41 MiB used, 140 GiB / 140 GiB avail 2026-03-08T23:56:11.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:10 vm10 ceph-mon[48982]: osd.7 [v2:192.168.123.110:6824/3776598559,v1:192.168.123.110:6825/3776598559] boot 2026-03-08T23:56:11.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:10 vm10 ceph-mon[48982]: osdmap e42: 8 total, 8 up, 8 in 2026-03-08T23:56:11.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:10 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-08T23:56:11.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:10 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/1810887184' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-08T23:56:11.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:10 vm10 ceph-mon[48982]: osdmap e43: 8 total, 8 up, 8 in 2026-03-08T23:56:11.210 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/mon.a/config 2026-03-08T23:56:11.571 INFO:teuthology.orchestra.run.vm04.stdout:pg_num: 1 2026-03-08T23:56:11.641 INFO:tasks.cephadm:Adding prometheus.a on vm10 2026-03-08T23:56:11.641 DEBUG:teuthology.orchestra.run.vm10:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -- ceph orch apply prometheus '1;vm10=a' 2026-03-08T23:56:11.849 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:11 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/2415125481' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-08T23:56:11.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:11 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/2061889256' entity='client.admin' cmd=[{"prefix": "osd pool get", "pool": ".mgr", "var": "pg_num"}]: dispatch 2026-03-08T23:56:11.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:11 vm04 ceph-mon[46823]: osdmap e44: 8 total, 8 up, 8 in 2026-03-08T23:56:11.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:11 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/2415125481' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-08T23:56:11.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:11 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/2061889256' entity='client.admin' cmd=[{"prefix": "osd pool get", "pool": ".mgr", "var": "pg_num"}]: dispatch 2026-03-08T23:56:11.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:11 vm04 ceph-mon[51053]: osdmap e44: 8 total, 8 up, 8 in 2026-03-08T23:56:11.921 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:11 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/2415125481' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-08T23:56:11.921 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:11 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/2061889256' entity='client.admin' cmd=[{"prefix": "osd pool get", "pool": ".mgr", "var": "pg_num"}]: dispatch 2026-03-08T23:56:11.921 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:11 vm10 ceph-mon[48982]: osdmap e44: 8 total, 8 up, 8 in 2026-03-08T23:56:12.150 INFO:teuthology.orchestra.run.vm10.stdout:Scheduled prometheus update... 2026-03-08T23:56:12.218 DEBUG:teuthology.orchestra.run.vm10:prometheus.a> sudo journalctl -f -n 0 -u ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@prometheus.a.service 2026-03-08T23:56:12.220 INFO:tasks.cephadm:Adding node-exporter.a on vm04 2026-03-08T23:56:12.220 INFO:tasks.cephadm:Adding node-exporter.b on vm10 2026-03-08T23:56:12.220 DEBUG:teuthology.orchestra.run.vm10:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -- ceph orch apply node-exporter '2;vm04=a;vm10=b' 2026-03-08T23:56:12.722 INFO:teuthology.orchestra.run.vm10.stdout:Scheduled node-exporter update... 2026-03-08T23:56:12.770 DEBUG:teuthology.orchestra.run.vm04:node-exporter.a> sudo journalctl -f -n 0 -u ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@node-exporter.a.service 2026-03-08T23:56:12.772 DEBUG:teuthology.orchestra.run.vm10:node-exporter.b> sudo journalctl -f -n 0 -u ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@node-exporter.b.service 2026-03-08T23:56:12.773 INFO:tasks.cephadm:Adding alertmanager.a on vm04 2026-03-08T23:56:12.774 DEBUG:teuthology.orchestra.run.vm10:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -- ceph orch apply alertmanager '1;vm04=a' 2026-03-08T23:56:12.987 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:12 vm10 ceph-mon[48982]: pgmap v86: 1 pgs: 1 remapped+peering; 449 KiB data, 46 MiB used, 160 GiB / 160 GiB avail 2026-03-08T23:56:12.987 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:12 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:56:12.987 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:12 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:56:12.987 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:12 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:56:12.987 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:12 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:56:12.987 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:12 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:56:12.987 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:12 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mgr module enable", "module": "prometheus"}]: dispatch 2026-03-08T23:56:12.987 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:12 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:56:13.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:12 vm04 ceph-mon[51053]: pgmap v86: 1 pgs: 1 remapped+peering; 449 KiB data, 46 MiB used, 160 GiB / 160 GiB avail 2026-03-08T23:56:13.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:12 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:56:13.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:12 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:56:13.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:12 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:56:13.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:12 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:56:13.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:12 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:56:13.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:12 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mgr module enable", "module": "prometheus"}]: dispatch 2026-03-08T23:56:13.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:12 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:56:13.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:12 vm04 ceph-mon[46823]: pgmap v86: 1 pgs: 1 remapped+peering; 449 KiB data, 46 MiB used, 160 GiB / 160 GiB avail 2026-03-08T23:56:13.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:12 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:56:13.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:12 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:56:13.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:12 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:56:13.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:12 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:56:13.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:12 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:56:13.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:12 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd=[{"prefix": "mgr module enable", "module": "prometheus"}]: dispatch 2026-03-08T23:56:13.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:12 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' 2026-03-08T23:56:13.577 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 08 23:56:13 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[50180]: ignoring --setuser ceph since I am not root 2026-03-08T23:56:13.577 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 08 23:56:13 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[50180]: ignoring --setgroup ceph since I am not root 2026-03-08T23:56:13.577 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 08 23:56:13 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[50180]: 2026-03-08T23:56:13.342+0000 7f2d90774000 -1 mgr[py] Module alerts has missing NOTIFY_TYPES member 2026-03-08T23:56:13.577 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 08 23:56:13 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[50180]: 2026-03-08T23:56:13.398+0000 7f2d90774000 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-08T23:56:13.599 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:56:13 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: ignoring --setuser ceph since I am not root 2026-03-08T23:56:13.600 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:56:13 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: ignoring --setgroup ceph since I am not root 2026-03-08T23:56:13.600 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:56:13 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-08T23:56:13.331+0000 7f1d549d6000 -1 mgr[py] Module alerts has missing NOTIFY_TYPES member 2026-03-08T23:56:13.600 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:56:13 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-08T23:56:13.388+0000 7f1d549d6000 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-08T23:56:14.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:13 vm10 ceph-mon[48982]: from='client.24284 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "prometheus", "placement": "1;vm10=a", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:56:14.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:13 vm10 ceph-mon[48982]: Saving service prometheus spec with placement vm10=a;count:1 2026-03-08T23:56:14.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:13 vm10 ceph-mon[48982]: from='client.24290 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "node-exporter", "placement": "2;vm04=a;vm10=b", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:56:14.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:13 vm10 ceph-mon[48982]: Saving service node-exporter spec with placement vm04=a;vm10=b;count:2 2026-03-08T23:56:14.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:13 vm10 ceph-mon[48982]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd='[{"prefix": "mgr module enable", "module": "prometheus"}]': finished 2026-03-08T23:56:14.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:13 vm10 ceph-mon[48982]: mgrmap e16: y(active, since 107s), standbys: x 2026-03-08T23:56:14.077 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 08 23:56:13 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[50180]: 2026-03-08T23:56:13.839+0000 7f2d90774000 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-08T23:56:14.099 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:56:13 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-08T23:56:13.789+0000 7f1d549d6000 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-08T23:56:14.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:13 vm04 ceph-mon[51053]: from='client.24284 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "prometheus", "placement": "1;vm10=a", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:56:14.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:13 vm04 ceph-mon[51053]: Saving service prometheus spec with placement vm10=a;count:1 2026-03-08T23:56:14.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:13 vm04 ceph-mon[51053]: from='client.24290 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "node-exporter", "placement": "2;vm04=a;vm10=b", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:56:14.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:13 vm04 ceph-mon[51053]: Saving service node-exporter spec with placement vm04=a;vm10=b;count:2 2026-03-08T23:56:14.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:13 vm04 ceph-mon[51053]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd='[{"prefix": "mgr module enable", "module": "prometheus"}]': finished 2026-03-08T23:56:14.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:13 vm04 ceph-mon[51053]: mgrmap e16: y(active, since 107s), standbys: x 2026-03-08T23:56:14.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:13 vm04 ceph-mon[46823]: from='client.24284 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "prometheus", "placement": "1;vm10=a", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:56:14.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:13 vm04 ceph-mon[46823]: Saving service prometheus spec with placement vm10=a;count:1 2026-03-08T23:56:14.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:13 vm04 ceph-mon[46823]: from='client.24290 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "node-exporter", "placement": "2;vm04=a;vm10=b", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:56:14.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:13 vm04 ceph-mon[46823]: Saving service node-exporter spec with placement vm04=a;vm10=b;count:2 2026-03-08T23:56:14.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:13 vm04 ceph-mon[46823]: from='mgr.14152 192.168.123.104:0/2427583735' entity='mgr.y' cmd='[{"prefix": "mgr module enable", "module": "prometheus"}]': finished 2026-03-08T23:56:14.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:13 vm04 ceph-mon[46823]: mgrmap e16: y(active, since 107s), standbys: x 2026-03-08T23:56:14.577 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 08 23:56:14 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[50180]: 2026-03-08T23:56:14.216+0000 7f2d90774000 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-08T23:56:14.577 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 08 23:56:14 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[50180]: 2026-03-08T23:56:14.370+0000 7f2d90774000 -1 mgr[py] Module diskprediction_local has missing NOTIFY_TYPES member 2026-03-08T23:56:14.577 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 08 23:56:14 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[50180]: 2026-03-08T23:56:14.429+0000 7f2d90774000 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-08T23:56:14.579 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:56:14 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-08T23:56:14.163+0000 7f1d549d6000 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-08T23:56:14.579 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:56:14 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-08T23:56:14.324+0000 7f1d549d6000 -1 mgr[py] Module diskprediction_local has missing NOTIFY_TYPES member 2026-03-08T23:56:14.579 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:56:14 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-08T23:56:14.385+0000 7f1d549d6000 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-08T23:56:14.849 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:56:14 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-08T23:56:14.579+0000 7f1d549d6000 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-08T23:56:15.077 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 08 23:56:14 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[50180]: 2026-03-08T23:56:14.616+0000 7f2d90774000 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-08T23:56:15.497 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:56:15 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-08T23:56:15.230+0000 7f1d549d6000 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-08T23:56:15.497 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:56:15 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-08T23:56:15.433+0000 7f1d549d6000 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-08T23:56:15.539 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 08 23:56:15 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[50180]: 2026-03-08T23:56:15.263+0000 7f2d90774000 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-08T23:56:15.539 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 08 23:56:15 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[50180]: 2026-03-08T23:56:15.473+0000 7f2d90774000 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-08T23:56:15.827 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 08 23:56:15 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[50180]: 2026-03-08T23:56:15.539+0000 7f2d90774000 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-08T23:56:15.827 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 08 23:56:15 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[50180]: 2026-03-08T23:56:15.602+0000 7f2d90774000 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-08T23:56:15.827 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 08 23:56:15 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[50180]: 2026-03-08T23:56:15.674+0000 7f2d90774000 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-08T23:56:15.827 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 08 23:56:15 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[50180]: 2026-03-08T23:56:15.746+0000 7f2d90774000 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-08T23:56:15.849 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:56:15 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-08T23:56:15.498+0000 7f1d549d6000 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-08T23:56:15.849 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:56:15 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-08T23:56:15.561+0000 7f1d549d6000 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-08T23:56:15.849 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:56:15 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-08T23:56:15.631+0000 7f1d549d6000 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-08T23:56:15.849 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:56:15 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-08T23:56:15.707+0000 7f1d549d6000 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-08T23:56:16.349 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:56:16 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-08T23:56:16.057+0000 7f1d549d6000 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-08T23:56:16.349 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:56:16 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-08T23:56:16.140+0000 7f1d549d6000 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-08T23:56:16.577 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 08 23:56:16 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[50180]: 2026-03-08T23:56:16.080+0000 7f2d90774000 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-08T23:56:16.577 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 08 23:56:16 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[50180]: 2026-03-08T23:56:16.155+0000 7f2d90774000 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-08T23:56:17.047 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 08 23:56:16 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[50180]: 2026-03-08T23:56:16.770+0000 7f2d90774000 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-08T23:56:17.047 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 08 23:56:16 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[50180]: 2026-03-08T23:56:16.838+0000 7f2d90774000 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-08T23:56:17.047 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 08 23:56:16 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[50180]: 2026-03-08T23:56:16.913+0000 7f2d90774000 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-08T23:56:17.055 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:56:16 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-08T23:56:16.777+0000 7f1d549d6000 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-08T23:56:17.055 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:56:16 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-08T23:56:16.847+0000 7f1d549d6000 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-08T23:56:17.055 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:56:16 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-08T23:56:16.919+0000 7f1d549d6000 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-08T23:56:17.324 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 08 23:56:17 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[50180]: 2026-03-08T23:56:17.047+0000 7f2d90774000 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-08T23:56:17.324 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 08 23:56:17 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[50180]: 2026-03-08T23:56:17.114+0000 7f2d90774000 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-08T23:56:17.324 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 08 23:56:17 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[50180]: 2026-03-08T23:56:17.223+0000 7f2d90774000 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-08T23:56:17.335 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:56:17 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-08T23:56:17.056+0000 7f1d549d6000 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-08T23:56:17.335 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:56:17 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-08T23:56:17.125+0000 7f1d549d6000 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-08T23:56:17.335 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:56:17 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-08T23:56:17.235+0000 7f1d549d6000 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-08T23:56:17.577 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 08 23:56:17 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[50180]: 2026-03-08T23:56:17.324+0000 7f2d90774000 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-08T23:56:17.599 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:56:17 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-08T23:56:17.336+0000 7f1d549d6000 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-08T23:56:17.947 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:17 vm10 ceph-mon[48982]: Standby manager daemon x restarted 2026-03-08T23:56:17.947 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:17 vm10 ceph-mon[48982]: Standby manager daemon x started 2026-03-08T23:56:17.947 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:17 vm10 ceph-mon[48982]: from='mgr.? 192.168.123.110:0/3243371168' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-08T23:56:17.947 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:17 vm10 ceph-mon[48982]: from='mgr.? 192.168.123.110:0/3243371168' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-08T23:56:17.947 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:17 vm10 ceph-mon[48982]: from='mgr.? 192.168.123.110:0/3243371168' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-08T23:56:17.948 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:17 vm10 ceph-mon[48982]: from='mgr.? 192.168.123.110:0/3243371168' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-08T23:56:17.948 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:17 vm10 ceph-mon[48982]: Active manager daemon y restarted 2026-03-08T23:56:17.948 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:17 vm10 ceph-mon[48982]: Activating manager daemon y 2026-03-08T23:56:17.948 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:17 vm10 ceph-mon[48982]: osdmap e45: 8 total, 8 up, 8 in 2026-03-08T23:56:17.948 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 08 23:56:17 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[50180]: 2026-03-08T23:56:17.688+0000 7f2d90774000 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-08T23:56:17.948 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 08 23:56:17 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[50180]: 2026-03-08T23:56:17.748+0000 7f2d90774000 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-08T23:56:17.948 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 08 23:56:17 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[50180]: [08/Mar/2026:23:56:17] ENGINE Bus STARTING 2026-03-08T23:56:17.948 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 08 23:56:17 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[50180]: CherryPy Checker: 2026-03-08T23:56:17.948 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 08 23:56:17 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[50180]: The Application mounted at '' has an empty config. 2026-03-08T23:56:17.948 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 08 23:56:17 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[50180]: 2026-03-08T23:56:17.948 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 08 23:56:17 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[50180]: [08/Mar/2026:23:56:17] ENGINE Serving on http://:::9283 2026-03-08T23:56:17.948 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 08 23:56:17 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[50180]: [08/Mar/2026:23:56:17] ENGINE Bus STARTED 2026-03-08T23:56:17.951 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:56:17 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-08T23:56:17.698+0000 7f1d549d6000 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-08T23:56:17.951 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:56:17 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-08T23:56:17.761+0000 7f1d549d6000 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-08T23:56:17.951 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:56:17 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: [08/Mar/2026:23:56:17] ENGINE Bus STARTING 2026-03-08T23:56:17.951 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:17 vm04 ceph-mon[51053]: Standby manager daemon x restarted 2026-03-08T23:56:17.951 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:17 vm04 ceph-mon[51053]: Standby manager daemon x started 2026-03-08T23:56:17.951 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:17 vm04 ceph-mon[51053]: from='mgr.? 192.168.123.110:0/3243371168' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-08T23:56:17.951 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:17 vm04 ceph-mon[51053]: from='mgr.? 192.168.123.110:0/3243371168' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-08T23:56:17.951 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:17 vm04 ceph-mon[51053]: from='mgr.? 192.168.123.110:0/3243371168' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-08T23:56:17.952 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:17 vm04 ceph-mon[51053]: from='mgr.? 192.168.123.110:0/3243371168' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-08T23:56:17.952 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:17 vm04 ceph-mon[51053]: Active manager daemon y restarted 2026-03-08T23:56:17.952 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:17 vm04 ceph-mon[51053]: Activating manager daemon y 2026-03-08T23:56:17.952 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:17 vm04 ceph-mon[51053]: osdmap e45: 8 total, 8 up, 8 in 2026-03-08T23:56:17.952 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:17 vm04 ceph-mon[46823]: Standby manager daemon x restarted 2026-03-08T23:56:17.952 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:17 vm04 ceph-mon[46823]: Standby manager daemon x started 2026-03-08T23:56:17.952 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:17 vm04 ceph-mon[46823]: from='mgr.? 192.168.123.110:0/3243371168' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-08T23:56:17.952 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:17 vm04 ceph-mon[46823]: from='mgr.? 192.168.123.110:0/3243371168' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-08T23:56:17.952 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:17 vm04 ceph-mon[46823]: from='mgr.? 192.168.123.110:0/3243371168' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-08T23:56:17.952 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:17 vm04 ceph-mon[46823]: from='mgr.? 192.168.123.110:0/3243371168' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-08T23:56:17.952 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:17 vm04 ceph-mon[46823]: Active manager daemon y restarted 2026-03-08T23:56:17.952 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:17 vm04 ceph-mon[46823]: Activating manager daemon y 2026-03-08T23:56:17.952 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:17 vm04 ceph-mon[46823]: osdmap e45: 8 total, 8 up, 8 in 2026-03-08T23:56:18.274 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:56:18 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: CherryPy Checker: 2026-03-08T23:56:18.274 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:56:18 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: The Application mounted at '' has an empty config. 2026-03-08T23:56:18.274 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:56:18 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-08T23:56:18.274 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:56:18 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: [08/Mar/2026:23:56:18] ENGINE Serving on http://:::9283 2026-03-08T23:56:18.274 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:56:18 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: [08/Mar/2026:23:56:18] ENGINE Bus STARTED 2026-03-08T23:56:18.274 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:56:18 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: [08/Mar/2026:23:56:18] ENGINE Bus STARTING 2026-03-08T23:56:18.274 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:56:18 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: [08/Mar/2026:23:56:18] ENGINE Serving on https://192.168.123.104:7150 2026-03-08T23:56:18.274 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:56:18 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: [08/Mar/2026:23:56:18] ENGINE Bus STARTED 2026-03-08T23:56:18.865 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:18 vm10 ceph-mon[48982]: mgrmap e17: y(active, starting, since 0.0469966s), standbys: x 2026-03-08T23:56:18.866 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:18 vm10 ceph-mon[48982]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-08T23:56:18.866 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:18 vm10 ceph-mon[48982]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-08T23:56:18.866 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:18 vm10 ceph-mon[48982]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-08T23:56:18.866 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:18 vm10 ceph-mon[48982]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-08T23:56:18.866 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:18 vm10 ceph-mon[48982]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-08T23:56:18.866 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:18 vm10 ceph-mon[48982]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-08T23:56:18.866 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:18 vm10 ceph-mon[48982]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-08T23:56:18.866 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:18 vm10 ceph-mon[48982]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-08T23:56:18.866 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:18 vm10 ceph-mon[48982]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-08T23:56:18.866 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:18 vm10 ceph-mon[48982]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-08T23:56:18.866 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:18 vm10 ceph-mon[48982]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-08T23:56:18.866 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:18 vm10 ceph-mon[48982]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-08T23:56:18.866 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:18 vm10 ceph-mon[48982]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-08T23:56:18.866 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:18 vm10 ceph-mon[48982]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-08T23:56:18.866 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:18 vm10 ceph-mon[48982]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-08T23:56:18.866 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:18 vm10 ceph-mon[48982]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-08T23:56:18.866 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:18 vm10 ceph-mon[48982]: Manager daemon y is now available 2026-03-08T23:56:18.866 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:18 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:18.866 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:18 vm10 ceph-mon[48982]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:56:18.866 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:18 vm10 ceph-mon[48982]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:56:18.866 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:18 vm10 ceph-mon[48982]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:56:18.866 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:18 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:56:18.866 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:18 vm10 ceph-mon[48982]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:56:18.866 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:18 vm10 ceph-mon[48982]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:56:18.866 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:18 vm10 ceph-mon[48982]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-08T23:56:18.866 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:18 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-08T23:56:18.866 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:18 vm10 ceph-mon[48982]: [08/Mar/2026:23:56:18] ENGINE Bus STARTING 2026-03-08T23:56:18.866 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:18 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:18.866 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:18 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:18.866 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:18 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:18.866 INFO:teuthology.orchestra.run.vm10.stdout:Scheduled alertmanager update... 2026-03-08T23:56:18.943 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:18 vm04 ceph-mon[51053]: mgrmap e17: y(active, starting, since 0.0469966s), standbys: x 2026-03-08T23:56:18.943 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:18 vm04 ceph-mon[51053]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-08T23:56:18.943 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:18 vm04 ceph-mon[51053]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-08T23:56:18.943 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:18 vm04 ceph-mon[51053]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-08T23:56:18.943 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:18 vm04 ceph-mon[51053]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-08T23:56:18.943 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:18 vm04 ceph-mon[51053]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-08T23:56:18.943 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:18 vm04 ceph-mon[51053]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-08T23:56:18.943 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:18 vm04 ceph-mon[51053]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-08T23:56:18.943 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:18 vm04 ceph-mon[51053]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-08T23:56:18.943 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:18 vm04 ceph-mon[51053]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-08T23:56:18.943 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:18 vm04 ceph-mon[51053]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-08T23:56:18.943 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:18 vm04 ceph-mon[51053]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-08T23:56:18.943 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:18 vm04 ceph-mon[51053]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-08T23:56:18.943 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:18 vm04 ceph-mon[51053]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-08T23:56:18.943 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:18 vm04 ceph-mon[51053]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-08T23:56:18.943 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:18 vm04 ceph-mon[51053]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-08T23:56:18.943 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:18 vm04 ceph-mon[51053]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-08T23:56:18.943 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:18 vm04 ceph-mon[51053]: Manager daemon y is now available 2026-03-08T23:56:18.943 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:18 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:18.943 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:18 vm04 ceph-mon[51053]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:56:18.943 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:18 vm04 ceph-mon[51053]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:56:18.943 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:18 vm04 ceph-mon[51053]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:56:18.943 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:18 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:56:18.943 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:18 vm04 ceph-mon[51053]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:56:18.944 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:18 vm04 ceph-mon[51053]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:56:18.944 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:18 vm04 ceph-mon[51053]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-08T23:56:18.944 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:18 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-08T23:56:18.944 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:18 vm04 ceph-mon[51053]: [08/Mar/2026:23:56:18] ENGINE Bus STARTING 2026-03-08T23:56:18.944 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:18 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:18.944 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:18 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:18.944 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:18 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:18.944 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:18 vm04 ceph-mon[46823]: mgrmap e17: y(active, starting, since 0.0469966s), standbys: x 2026-03-08T23:56:18.944 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:18 vm04 ceph-mon[46823]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-08T23:56:18.944 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:18 vm04 ceph-mon[46823]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-08T23:56:18.944 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:18 vm04 ceph-mon[46823]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-08T23:56:18.944 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:18 vm04 ceph-mon[46823]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-08T23:56:18.944 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:18 vm04 ceph-mon[46823]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-08T23:56:18.944 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:18 vm04 ceph-mon[46823]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-08T23:56:18.944 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:18 vm04 ceph-mon[46823]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-08T23:56:18.944 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:18 vm04 ceph-mon[46823]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-08T23:56:18.944 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:18 vm04 ceph-mon[46823]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-08T23:56:18.944 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:18 vm04 ceph-mon[46823]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-08T23:56:18.944 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:18 vm04 ceph-mon[46823]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-08T23:56:18.944 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:18 vm04 ceph-mon[46823]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-08T23:56:18.944 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:18 vm04 ceph-mon[46823]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-08T23:56:18.944 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:18 vm04 ceph-mon[46823]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-08T23:56:18.944 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:18 vm04 ceph-mon[46823]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-08T23:56:18.944 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:18 vm04 ceph-mon[46823]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-08T23:56:18.944 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:18 vm04 ceph-mon[46823]: Manager daemon y is now available 2026-03-08T23:56:18.944 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:18 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:18.944 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:18 vm04 ceph-mon[46823]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:56:18.944 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:18 vm04 ceph-mon[46823]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:56:18.944 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:18 vm04 ceph-mon[46823]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:56:18.944 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:18 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:56:18.944 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:18 vm04 ceph-mon[46823]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:56:18.944 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:18 vm04 ceph-mon[46823]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:56:18.944 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:18 vm04 ceph-mon[46823]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-08T23:56:18.944 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:18 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-08T23:56:18.944 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:18 vm04 ceph-mon[46823]: [08/Mar/2026:23:56:18] ENGINE Bus STARTING 2026-03-08T23:56:18.944 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:18 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:18.944 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:18 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:18.944 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:18 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:18.948 DEBUG:teuthology.orchestra.run.vm04:alertmanager.a> sudo journalctl -f -n 0 -u ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@alertmanager.a.service 2026-03-08T23:56:18.951 INFO:tasks.cephadm:Adding grafana.a on vm10 2026-03-08T23:56:18.951 DEBUG:teuthology.orchestra.run.vm10:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -- ceph orch apply grafana '1;vm10=a' 2026-03-08T23:56:19.538 INFO:teuthology.orchestra.run.vm10.stdout:Scheduled grafana update... 2026-03-08T23:56:19.602 DEBUG:teuthology.orchestra.run.vm10:grafana.a> sudo journalctl -f -n 0 -u ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@grafana.a.service 2026-03-08T23:56:19.603 INFO:tasks.cephadm:Setting up client nodes... 2026-03-08T23:56:19.603 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -- ceph auth get-or-create client.0 mon 'allow *' osd 'allow *' mds 'allow *' mgr 'allow *' 2026-03-08T23:56:20.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:19 vm10 ceph-mon[48982]: [08/Mar/2026:23:56:18] ENGINE Serving on https://192.168.123.104:7150 2026-03-08T23:56:20.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:19 vm10 ceph-mon[48982]: [08/Mar/2026:23:56:18] ENGINE Bus STARTED 2026-03-08T23:56:20.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:19 vm10 ceph-mon[48982]: mgrmap e18: y(active, since 1.06526s), standbys: x 2026-03-08T23:56:20.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:19 vm10 ceph-mon[48982]: from='client.14397 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "alertmanager", "placement": "1;vm04=a", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:56:20.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:19 vm10 ceph-mon[48982]: pgmap v3: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-08T23:56:20.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:19 vm10 ceph-mon[48982]: Saving service alertmanager spec with placement vm04=a;count:1 2026-03-08T23:56:20.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:19 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:20.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:19 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:20.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:19 vm10 ceph-mon[48982]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:56:20.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:19 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:56:20.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:19 vm10 ceph-mon[48982]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.5", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:56:20.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:19 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.5", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:56:20.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:19 vm10 ceph-mon[48982]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.6", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:56:20.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:19 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.6", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:56:20.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:19 vm10 ceph-mon[48982]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.7", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:56:20.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:19 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.7", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:56:20.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:19 vm10 ceph-mon[48982]: Adjusting osd_memory_target on vm10 to 65804k 2026-03-08T23:56:20.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:19 vm10 ceph-mon[48982]: Unable to set osd_memory_target on vm10 to 67384115: error parsing value: Value '67384115' is below minimum 939524096 2026-03-08T23:56:20.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:19 vm10 ceph-mon[48982]: Updating vm10:/etc/ceph/ceph.conf 2026-03-08T23:56:20.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:19 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:20.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:19 vm10 ceph-mon[48982]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm04", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:56:20.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:19 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm04", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:56:20.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:19 vm10 ceph-mon[48982]: Updating vm04:/etc/ceph/ceph.conf 2026-03-08T23:56:20.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:19 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:20.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:19 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:20.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:19 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:20.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:19 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:20.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:19 vm04 ceph-mon[51053]: [08/Mar/2026:23:56:18] ENGINE Serving on https://192.168.123.104:7150 2026-03-08T23:56:20.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:19 vm04 ceph-mon[51053]: [08/Mar/2026:23:56:18] ENGINE Bus STARTED 2026-03-08T23:56:20.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:19 vm04 ceph-mon[51053]: mgrmap e18: y(active, since 1.06526s), standbys: x 2026-03-08T23:56:20.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:19 vm04 ceph-mon[51053]: from='client.14397 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "alertmanager", "placement": "1;vm04=a", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:56:20.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:19 vm04 ceph-mon[51053]: pgmap v3: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-08T23:56:20.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:19 vm04 ceph-mon[51053]: Saving service alertmanager spec with placement vm04=a;count:1 2026-03-08T23:56:20.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:19 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:20.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:19 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:20.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:19 vm04 ceph-mon[51053]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:56:20.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:19 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:56:20.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:19 vm04 ceph-mon[51053]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.5", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:56:20.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:19 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.5", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:56:20.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:19 vm04 ceph-mon[51053]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.6", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:56:20.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:19 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.6", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:56:20.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:19 vm04 ceph-mon[51053]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.7", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:56:20.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:19 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.7", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:56:20.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:19 vm04 ceph-mon[51053]: Adjusting osd_memory_target on vm10 to 65804k 2026-03-08T23:56:20.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:19 vm04 ceph-mon[51053]: Unable to set osd_memory_target on vm10 to 67384115: error parsing value: Value '67384115' is below minimum 939524096 2026-03-08T23:56:20.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:19 vm04 ceph-mon[51053]: Updating vm10:/etc/ceph/ceph.conf 2026-03-08T23:56:20.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:19 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:20.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:19 vm04 ceph-mon[51053]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm04", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:56:20.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:19 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm04", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:56:20.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:19 vm04 ceph-mon[51053]: Updating vm04:/etc/ceph/ceph.conf 2026-03-08T23:56:20.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:19 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:20.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:19 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:20.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:19 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:20.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:19 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:20.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:19 vm04 ceph-mon[46823]: [08/Mar/2026:23:56:18] ENGINE Serving on https://192.168.123.104:7150 2026-03-08T23:56:20.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:19 vm04 ceph-mon[46823]: [08/Mar/2026:23:56:18] ENGINE Bus STARTED 2026-03-08T23:56:20.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:19 vm04 ceph-mon[46823]: mgrmap e18: y(active, since 1.06526s), standbys: x 2026-03-08T23:56:20.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:19 vm04 ceph-mon[46823]: from='client.14397 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "alertmanager", "placement": "1;vm04=a", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:56:20.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:19 vm04 ceph-mon[46823]: pgmap v3: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-08T23:56:20.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:19 vm04 ceph-mon[46823]: Saving service alertmanager spec with placement vm04=a;count:1 2026-03-08T23:56:20.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:19 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:20.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:19 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:20.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:19 vm04 ceph-mon[46823]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:56:20.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:19 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:56:20.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:19 vm04 ceph-mon[46823]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.5", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:56:20.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:19 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.5", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:56:20.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:19 vm04 ceph-mon[46823]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.6", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:56:20.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:19 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.6", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:56:20.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:19 vm04 ceph-mon[46823]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.7", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:56:20.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:19 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.7", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:56:20.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:19 vm04 ceph-mon[46823]: Adjusting osd_memory_target on vm10 to 65804k 2026-03-08T23:56:20.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:19 vm04 ceph-mon[46823]: Unable to set osd_memory_target on vm10 to 67384115: error parsing value: Value '67384115' is below minimum 939524096 2026-03-08T23:56:20.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:19 vm04 ceph-mon[46823]: Updating vm10:/etc/ceph/ceph.conf 2026-03-08T23:56:20.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:19 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:20.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:19 vm04 ceph-mon[46823]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm04", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:56:20.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:19 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm04", "name": "osd_memory_target"}]: dispatch 2026-03-08T23:56:20.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:19 vm04 ceph-mon[46823]: Updating vm04:/etc/ceph/ceph.conf 2026-03-08T23:56:20.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:19 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:20.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:19 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:20.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:19 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:20.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:19 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:20.434 INFO:teuthology.orchestra.run.vm04.stdout:[client.0] 2026-03-08T23:56:20.434 INFO:teuthology.orchestra.run.vm04.stdout: key = AQCkDK5pzt5XGRAAJAGjpK/MQmyPK4c5sUkkWw== 2026-03-08T23:56:20.434 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 08 23:56:20 vm04 systemd[1]: Starting Ceph node-exporter.a for fdcbddf6-1b49-11f1-80b0-7392062373f9... 2026-03-08T23:56:20.520 DEBUG:teuthology.orchestra.run.vm04:> set -ex 2026-03-08T23:56:20.520 DEBUG:teuthology.orchestra.run.vm04:> sudo dd of=/etc/ceph/ceph.client.0.keyring 2026-03-08T23:56:20.520 DEBUG:teuthology.orchestra.run.vm04:> sudo chmod 0644 /etc/ceph/ceph.client.0.keyring 2026-03-08T23:56:20.567 DEBUG:teuthology.orchestra.run.vm10:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -- ceph auth get-or-create client.1 mon 'allow *' osd 'allow *' mds 'allow *' mgr 'allow *' 2026-03-08T23:56:20.846 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 08 23:56:20 vm04 bash[65268]: Trying to pull quay.io/prometheus/node-exporter:v1.3.1... 2026-03-08T23:56:21.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:20 vm04 ceph-mon[51053]: Updating vm10:/etc/ceph/ceph.client.admin.keyring 2026-03-08T23:56:21.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:20 vm04 ceph-mon[51053]: Updating vm04:/etc/ceph/ceph.client.admin.keyring 2026-03-08T23:56:21.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:20 vm04 ceph-mon[51053]: from='client.14427 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "grafana", "placement": "1;vm10=a", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:56:21.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:20 vm04 ceph-mon[51053]: Saving service grafana spec with placement vm10=a;count:1 2026-03-08T23:56:21.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:20 vm04 ceph-mon[51053]: Deploying daemon node-exporter.a on vm04 2026-03-08T23:56:21.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:20 vm04 ceph-mon[51053]: pgmap v4: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-08T23:56:21.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:20 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/2966623853' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-08T23:56:21.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:20 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/2966623853' entity='client.admin' cmd='[{"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]': finished 2026-03-08T23:56:21.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:20 vm04 ceph-mon[51053]: mgrmap e19: y(active, since 2s), standbys: x 2026-03-08T23:56:21.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:20 vm04 ceph-mon[46823]: Updating vm10:/etc/ceph/ceph.client.admin.keyring 2026-03-08T23:56:21.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:20 vm04 ceph-mon[46823]: Updating vm04:/etc/ceph/ceph.client.admin.keyring 2026-03-08T23:56:21.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:20 vm04 ceph-mon[46823]: from='client.14427 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "grafana", "placement": "1;vm10=a", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:56:21.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:20 vm04 ceph-mon[46823]: Saving service grafana spec with placement vm10=a;count:1 2026-03-08T23:56:21.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:20 vm04 ceph-mon[46823]: Deploying daemon node-exporter.a on vm04 2026-03-08T23:56:21.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:20 vm04 ceph-mon[46823]: pgmap v4: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-08T23:56:21.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:20 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/2966623853' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-08T23:56:21.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:20 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/2966623853' entity='client.admin' cmd='[{"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]': finished 2026-03-08T23:56:21.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:20 vm04 ceph-mon[46823]: mgrmap e19: y(active, since 2s), standbys: x 2026-03-08T23:56:21.129 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:20 vm10 ceph-mon[48982]: Updating vm10:/etc/ceph/ceph.client.admin.keyring 2026-03-08T23:56:21.129 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:20 vm10 ceph-mon[48982]: Updating vm04:/etc/ceph/ceph.client.admin.keyring 2026-03-08T23:56:21.129 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:20 vm10 ceph-mon[48982]: from='client.14427 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "grafana", "placement": "1;vm10=a", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:56:21.129 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:20 vm10 ceph-mon[48982]: Saving service grafana spec with placement vm10=a;count:1 2026-03-08T23:56:21.129 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:20 vm10 ceph-mon[48982]: Deploying daemon node-exporter.a on vm04 2026-03-08T23:56:21.129 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:20 vm10 ceph-mon[48982]: pgmap v4: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-08T23:56:21.129 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:20 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/2966623853' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-08T23:56:21.129 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:20 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/2966623853' entity='client.admin' cmd='[{"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]': finished 2026-03-08T23:56:21.129 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:20 vm10 ceph-mon[48982]: mgrmap e19: y(active, since 2s), standbys: x 2026-03-08T23:56:21.129 INFO:teuthology.orchestra.run.vm10.stdout:[client.1] 2026-03-08T23:56:21.129 INFO:teuthology.orchestra.run.vm10.stdout: key = AQClDK5pXY9bBxAAu1wZ9a6suGehtdzqN6TAYA== 2026-03-08T23:56:21.204 DEBUG:teuthology.orchestra.run.vm10:> set -ex 2026-03-08T23:56:21.204 DEBUG:teuthology.orchestra.run.vm10:> sudo dd of=/etc/ceph/ceph.client.1.keyring 2026-03-08T23:56:21.204 DEBUG:teuthology.orchestra.run.vm10:> sudo chmod 0644 /etc/ceph/ceph.client.1.keyring 2026-03-08T23:56:21.244 INFO:tasks.ceph:Waiting until ceph daemons up and pgs clean... 2026-03-08T23:56:21.244 INFO:tasks.cephadm.ceph_manager.ceph:waiting for mgr available 2026-03-08T23:56:21.244 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -- ceph mgr dump --format=json 2026-03-08T23:56:21.414 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/mon.a/config 2026-03-08T23:56:21.828 INFO:teuthology.orchestra.run.vm04.stdout: 2026-03-08T23:56:21.829 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 08 23:56:21 vm04 bash[65268]: Getting image source signatures 2026-03-08T23:56:21.830 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 08 23:56:21 vm04 bash[65268]: Copying blob sha256:b5db1e299295edf3005515ab7879c1df64a33c185d3a7a23aa4dcaa17d26f7b3 2026-03-08T23:56:21.830 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 08 23:56:21 vm04 bash[65268]: Copying blob sha256:aa2a8d90b84cb2a9c422e7005cd166a008ccf22ef5d7d4f07128478585ce35ea 2026-03-08T23:56:21.830 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 08 23:56:21 vm04 bash[65268]: Copying blob sha256:b45d31ee2d7f9f452678a85b0c837c29e12089f31ee8dbac6c8c24dfa4054a30 2026-03-08T23:56:21.887 INFO:teuthology.orchestra.run.vm04.stdout:{"epoch":19,"active_gid":24298,"active_name":"y","active_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6800","nonce":46665440},{"type":"v1","addr":"192.168.123.104:6801","nonce":46665440}]},"active_addr":"192.168.123.104:6801/46665440","active_change":"2026-03-08T23:56:17.765473+0000","active_mgr_features":4540138303579357183,"available":true,"standbys":[{"gid":24302,"name":"x","mgr_features":4540138303579357183,"available_modules":[{"name":"alerts","can_run":true,"error_string":"","module_options":{"interval":{"name":"interval","type":"secs","level":"advanced","flags":1,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"How frequently to reexamine health status","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"smtp_destination":{"name":"smtp_destination","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Email address to send alerts to","long_desc":"","tags":[],"see_also":[]},"smtp_from_name":{"name":"smtp_from_name","type":"str","level":"advanced","flags":1,"default_value":"Ceph","min":"","max":"","enum_allowed":[],"desc":"Email From: name","long_desc":"","tags":[],"see_also":[]},"smtp_host":{"name":"smtp_host","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"SMTP server","long_desc":"","tags":[],"see_also":[]},"smtp_password":{"name":"smtp_password","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Password to authenticate with","long_desc":"","tags":[],"see_also":[]},"smtp_port":{"name":"smtp_port","type":"int","level":"advanced","flags":1,"default_value":"465","min":"","max":"","enum_allowed":[],"desc":"SMTP port","long_desc":"","tags":[],"see_also":[]},"smtp_sender":{"name":"smtp_sender","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"SMTP envelope sender","long_desc":"","tags":[],"see_also":[]},"smtp_ssl":{"name":"smtp_ssl","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Use SSL to connect to SMTP server","long_desc":"","tags":[],"see_also":[]},"smtp_user":{"name":"smtp_user","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"User to authenticate as","long_desc":"","tags":[],"see_also":[]}}},{"name":"balancer","can_run":true,"error_string":"","module_options":{"active":{"name":"active","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"automatically balance PGs across cluster","long_desc":"","tags":[],"see_also":[]},"begin_time":{"name":"begin_time","type":"str","level":"advanced","flags":1,"default_value":"0000","min":"","max":"","enum_allowed":[],"desc":"beginning time of day to automatically balance","long_desc":"This is a time of day in the format HHMM.","tags":[],"see_also":[]},"begin_weekday":{"name":"begin_weekday","type":"uint","level":"advanced","flags":1,"default_value":"0","min":"0","max":"7","enum_allowed":[],"desc":"Restrict automatic balancing to this day of the week or later","long_desc":"0 or 7 = Sunday, 1 = Monday, etc.","tags":[],"see_also":[]},"crush_compat_max_iterations":{"name":"crush_compat_max_iterations","type":"uint","level":"advanced","flags":1,"default_value":"25","min":"1","max":"250","enum_allowed":[],"desc":"maximum number of iterations to attempt optimization","long_desc":"","tags":[],"see_also":[]},"crush_compat_metrics":{"name":"crush_compat_metrics","type":"str","level":"advanced","flags":1,"default_value":"pgs,objects,bytes","min":"","max":"","enum_allowed":[],"desc":"metrics with which to calculate OSD utilization","long_desc":"Value is a list of one or more of \"pgs\", \"objects\", or \"bytes\", and indicates which metrics to use to balance utilization.","tags":[],"see_also":[]},"crush_compat_step":{"name":"crush_compat_step","type":"float","level":"advanced","flags":1,"default_value":"0.5","min":"0.001","max":"0.999","enum_allowed":[],"desc":"aggressiveness of optimization","long_desc":".99 is very aggressive, .01 is less aggressive","tags":[],"see_also":[]},"end_time":{"name":"end_time","type":"str","level":"advanced","flags":1,"default_value":"2400","min":"","max":"","enum_allowed":[],"desc":"ending time of day to automatically balance","long_desc":"This is a time of day in the format HHMM.","tags":[],"see_also":[]},"end_weekday":{"name":"end_weekday","type":"uint","level":"advanced","flags":1,"default_value":"7","min":"0","max":"7","enum_allowed":[],"desc":"Restrict automatic balancing to days of the week earlier than this","long_desc":"0 or 7 = Sunday, 1 = Monday, etc.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"min_score":{"name":"min_score","type":"float","level":"advanced","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"minimum score, below which no optimization is attempted","long_desc":"","tags":[],"see_also":[]},"mode":{"name":"mode","type":"str","level":"advanced","flags":1,"default_value":"upmap","min":"","max":"","enum_allowed":["crush-compat","none","upmap"],"desc":"Balancer mode","long_desc":"","tags":[],"see_also":[]},"pool_ids":{"name":"pool_ids","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"pools which the automatic balancing will be limited to","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"how frequently to wake up and attempt optimization","long_desc":"","tags":[],"see_also":[]},"upmap_max_deviation":{"name":"upmap_max_deviation","type":"int","level":"advanced","flags":1,"default_value":"5","min":"1","max":"","enum_allowed":[],"desc":"deviation below which no optimization is attempted","long_desc":"If the number of PGs are within this count then no optimization is attempted","tags":[],"see_also":[]},"upmap_max_optimizations":{"name":"upmap_max_optimizations","type":"uint","level":"advanced","flags":1,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"maximum upmap optimizations to make per attempt","long_desc":"","tags":[],"see_also":[]}}},{"name":"cephadm","can_run":true,"error_string":"","module_options":{"agent_down_multiplier":{"name":"agent_down_multiplier","type":"float","level":"advanced","flags":0,"default_value":"3.0","min":"","max":"","enum_allowed":[],"desc":"Multiplied by agent refresh rate to calculate how long agent must not report before being marked down","long_desc":"","tags":[],"see_also":[]},"agent_refresh_rate":{"name":"agent_refresh_rate","type":"secs","level":"advanced","flags":0,"default_value":"20","min":"","max":"","enum_allowed":[],"desc":"How often agent on each host will try to gather and send metadata","long_desc":"","tags":[],"see_also":[]},"agent_starting_port":{"name":"agent_starting_port","type":"int","level":"advanced","flags":0,"default_value":"4721","min":"","max":"","enum_allowed":[],"desc":"First port agent will try to bind to (will also try up to next 1000 subsequent ports if blocked)","long_desc":"","tags":[],"see_also":[]},"allow_ptrace":{"name":"allow_ptrace","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow SYS_PTRACE capability on ceph containers","long_desc":"The SYS_PTRACE capability is needed to attach to a process with gdb or strace. Enabling this options can allow debugging daemons that encounter problems at runtime.","tags":[],"see_also":[]},"autotune_interval":{"name":"autotune_interval","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to autotune daemon memory","long_desc":"","tags":[],"see_also":[]},"autotune_memory_target_ratio":{"name":"autotune_memory_target_ratio","type":"float","level":"advanced","flags":0,"default_value":"0.7","min":"","max":"","enum_allowed":[],"desc":"ratio of total system memory to divide amongst autotuned daemons","long_desc":"","tags":[],"see_also":[]},"config_checks_enabled":{"name":"config_checks_enabled","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable or disable the cephadm configuration analysis","long_desc":"","tags":[],"see_also":[]},"config_dashboard":{"name":"config_dashboard","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"manage configs like API endpoints in Dashboard.","long_desc":"","tags":[],"see_also":[]},"container_image_alertmanager":{"name":"container_image_alertmanager","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/alertmanager:v0.23.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_base":{"name":"container_image_base","type":"str","level":"advanced","flags":1,"default_value":"quay.io/ceph/ceph","min":"","max":"","enum_allowed":[],"desc":"Container image name, without the tag","long_desc":"","tags":[],"see_also":[]},"container_image_grafana":{"name":"container_image_grafana","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/ceph-grafana:8.3.5","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_haproxy":{"name":"container_image_haproxy","type":"str","level":"advanced","flags":0,"default_value":"docker.io/library/haproxy:2.3","min":"","max":"","enum_allowed":[],"desc":"HAproxy container image","long_desc":"","tags":[],"see_also":[]},"container_image_keepalived":{"name":"container_image_keepalived","type":"str","level":"advanced","flags":0,"default_value":"docker.io/arcts/keepalived","min":"","max":"","enum_allowed":[],"desc":"Keepalived container image","long_desc":"","tags":[],"see_also":[]},"container_image_node_exporter":{"name":"container_image_node_exporter","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/node-exporter:v1.3.1","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_prometheus":{"name":"container_image_prometheus","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/prometheus:v2.33.4","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_snmp_gateway":{"name":"container_image_snmp_gateway","type":"str","level":"advanced","flags":0,"default_value":"docker.io/maxwo/snmp-notifier:v1.2.1","min":"","max":"","enum_allowed":[],"desc":"SNMP Gateway container image","long_desc":"","tags":[],"see_also":[]},"container_init":{"name":"container_init","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Run podman/docker with `--init`","long_desc":"","tags":[],"see_also":[]},"daemon_cache_timeout":{"name":"daemon_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"seconds to cache service (daemon) inventory","long_desc":"","tags":[],"see_also":[]},"default_registry":{"name":"default_registry","type":"str","level":"advanced","flags":0,"default_value":"docker.io","min":"","max":"","enum_allowed":[],"desc":"Search-registry to which we should normalize unqualified image names. This is not the default registry","long_desc":"","tags":[],"see_also":[]},"device_cache_timeout":{"name":"device_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"1800","min":"","max":"","enum_allowed":[],"desc":"seconds to cache device inventory","long_desc":"","tags":[],"see_also":[]},"device_enhanced_scan":{"name":"device_enhanced_scan","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Use libstoragemgmt during device scans","long_desc":"","tags":[],"see_also":[]},"facts_cache_timeout":{"name":"facts_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"seconds to cache host facts data","long_desc":"","tags":[],"see_also":[]},"host_check_interval":{"name":"host_check_interval","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to perform a host check","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"log to the \"cephadm\" cluster log channel\"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"manage_etc_ceph_ceph_conf":{"name":"manage_etc_ceph_ceph_conf","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Manage and own /etc/ceph/ceph.conf on the hosts.","long_desc":"","tags":[],"see_also":[]},"manage_etc_ceph_ceph_conf_hosts":{"name":"manage_etc_ceph_ceph_conf_hosts","type":"str","level":"advanced","flags":0,"default_value":"*","min":"","max":"","enum_allowed":[],"desc":"PlacementSpec describing on which hosts to manage /etc/ceph/ceph.conf","long_desc":"","tags":[],"see_also":[]},"max_count_per_host":{"name":"max_count_per_host","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"max number of daemons per service per host","long_desc":"","tags":[],"see_also":[]},"max_osd_draining_count":{"name":"max_osd_draining_count","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"max number of osds that will be drained simultaneously when osds are removed","long_desc":"","tags":[],"see_also":[]},"migration_current":{"name":"migration_current","type":"int","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"internal - do not modify","long_desc":"","tags":[],"see_also":[]},"mode":{"name":"mode","type":"str","level":"advanced","flags":0,"default_value":"root","min":"","max":"","enum_allowed":["cephadm-package","root"],"desc":"mode for remote execution of cephadm","long_desc":"","tags":[],"see_also":[]},"prometheus_alerts_path":{"name":"prometheus_alerts_path","type":"str","level":"advanced","flags":0,"default_value":"/etc/prometheus/ceph/ceph_default_alerts.yml","min":"","max":"","enum_allowed":[],"desc":"location of alerts to include in prometheus deployments","long_desc":"","tags":[],"see_also":[]},"registry_insecure":{"name":"registry_insecure","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Registry is to be considered insecure (no TLS available). Only for development purposes.","long_desc":"","tags":[],"see_also":[]},"registry_password":{"name":"registry_password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Custom repository password. Only used for logging into a registry.","long_desc":"","tags":[],"see_also":[]},"registry_url":{"name":"registry_url","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Registry url for login purposes. This is not the default registry","long_desc":"","tags":[],"see_also":[]},"registry_username":{"name":"registry_username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Custom repository username. Only used for logging into a registry.","long_desc":"","tags":[],"see_also":[]},"ssh_config_file":{"name":"ssh_config_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"customized SSH config file to connect to managed hosts","long_desc":"","tags":[],"see_also":[]},"use_agent":{"name":"use_agent","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Use cephadm agent on each host to gather and send metadata","long_desc":"","tags":[],"see_also":[]},"use_repo_digest":{"name":"use_repo_digest","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Automatically convert image tags to image digest. Make sure all daemons use the same image","long_desc":"","tags":[],"see_also":[]},"warn_on_failed_host_check":{"name":"warn_on_failed_host_check","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if the host check fails","long_desc":"","tags":[],"see_also":[]},"warn_on_stray_daemons":{"name":"warn_on_stray_daemons","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if daemons are detected that are not managed by cephadm","long_desc":"","tags":[],"see_also":[]},"warn_on_stray_hosts":{"name":"warn_on_stray_hosts","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if daemons are detected on a host that is not managed by cephadm","long_desc":"","tags":[],"see_also":[]}}},{"name":"crash","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"retain_interval":{"name":"retain_interval","type":"secs","level":"advanced","flags":1,"default_value":"31536000","min":"","max":"","enum_allowed":[],"desc":"how long to retain crashes before pruning them","long_desc":"","tags":[],"see_also":[]},"warn_recent_interval":{"name":"warn_recent_interval","type":"secs","level":"advanced","flags":1,"default_value":"1209600","min":"","max":"","enum_allowed":[],"desc":"time interval in which to warn about recent crashes","long_desc":"","tags":[],"see_also":[]}}},{"name":"dashboard","can_run":true,"error_string":"","module_options":{"ACCOUNT_LOCKOUT_ATTEMPTS":{"name":"ACCOUNT_LOCKOUT_ATTEMPTS","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ALERTMANAGER_API_HOST":{"name":"ALERTMANAGER_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ALERTMANAGER_API_SSL_VERIFY":{"name":"ALERTMANAGER_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"AUDIT_API_ENABLED":{"name":"AUDIT_API_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"AUDIT_API_LOG_PAYLOAD":{"name":"AUDIT_API_LOG_PAYLOAD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ENABLE_BROWSABLE_API":{"name":"ENABLE_BROWSABLE_API","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_CEPHFS":{"name":"FEATURE_TOGGLE_CEPHFS","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_ISCSI":{"name":"FEATURE_TOGGLE_ISCSI","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_MIRRORING":{"name":"FEATURE_TOGGLE_MIRRORING","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_NFS":{"name":"FEATURE_TOGGLE_NFS","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_RBD":{"name":"FEATURE_TOGGLE_RBD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_RGW":{"name":"FEATURE_TOGGLE_RGW","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE":{"name":"GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_PASSWORD":{"name":"GRAFANA_API_PASSWORD","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_SSL_VERIFY":{"name":"GRAFANA_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_URL":{"name":"GRAFANA_API_URL","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_USERNAME":{"name":"GRAFANA_API_USERNAME","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_FRONTEND_API_URL":{"name":"GRAFANA_FRONTEND_API_URL","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_UPDATE_DASHBOARDS":{"name":"GRAFANA_UPDATE_DASHBOARDS","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ISCSI_API_SSL_VERIFICATION":{"name":"ISCSI_API_SSL_VERIFICATION","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ISSUE_TRACKER_API_KEY":{"name":"ISSUE_TRACKER_API_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PROMETHEUS_API_HOST":{"name":"PROMETHEUS_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PROMETHEUS_API_SSL_VERIFY":{"name":"PROMETHEUS_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_COMPLEXITY_ENABLED":{"name":"PWD_POLICY_CHECK_COMPLEXITY_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_EXCLUSION_LIST_ENABLED":{"name":"PWD_POLICY_CHECK_EXCLUSION_LIST_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_LENGTH_ENABLED":{"name":"PWD_POLICY_CHECK_LENGTH_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_OLDPWD_ENABLED":{"name":"PWD_POLICY_CHECK_OLDPWD_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_REPETITIVE_CHARS_ENABLED":{"name":"PWD_POLICY_CHECK_REPETITIVE_CHARS_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_SEQUENTIAL_CHARS_ENABLED":{"name":"PWD_POLICY_CHECK_SEQUENTIAL_CHARS_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_USERNAME_ENABLED":{"name":"PWD_POLICY_CHECK_USERNAME_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_ENABLED":{"name":"PWD_POLICY_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_EXCLUSION_LIST":{"name":"PWD_POLICY_EXCLUSION_LIST","type":"str","level":"advanced","flags":0,"default_value":"osd,host,dashboard,pool,block,nfs,ceph,monitors,gateway,logs,crush,maps","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_MIN_COMPLEXITY":{"name":"PWD_POLICY_MIN_COMPLEXITY","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_MIN_LENGTH":{"name":"PWD_POLICY_MIN_LENGTH","type":"int","level":"advanced","flags":0,"default_value":"8","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"REST_REQUESTS_TIMEOUT":{"name":"REST_REQUESTS_TIMEOUT","type":"int","level":"advanced","flags":0,"default_value":"45","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_ACCESS_KEY":{"name":"RGW_API_ACCESS_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_ADMIN_RESOURCE":{"name":"RGW_API_ADMIN_RESOURCE","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SECRET_KEY":{"name":"RGW_API_SECRET_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SSL_VERIFY":{"name":"RGW_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_SPAN":{"name":"USER_PWD_EXPIRATION_SPAN","type":"int","level":"advanced","flags":0,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_WARNING_1":{"name":"USER_PWD_EXPIRATION_WARNING_1","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_WARNING_2":{"name":"USER_PWD_EXPIRATION_WARNING_2","type":"int","level":"advanced","flags":0,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"crt_file":{"name":"crt_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"debug":{"name":"debug","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable/disable debug options","long_desc":"","tags":[],"see_also":[]},"jwt_token_ttl":{"name":"jwt_token_ttl","type":"int","level":"advanced","flags":0,"default_value":"28800","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"key_file":{"name":"key_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"motd":{"name":"motd","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"The message of the day","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"::","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"int","level":"advanced","flags":0,"default_value":"8080","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl":{"name":"ssl","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl_server_port":{"name":"ssl_server_port","type":"int","level":"advanced","flags":0,"default_value":"8443","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_behaviour":{"name":"standby_behaviour","type":"str","level":"advanced","flags":0,"default_value":"redirect","min":"","max":"","enum_allowed":["error","redirect"],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_error_status_code":{"name":"standby_error_status_code","type":"int","level":"advanced","flags":0,"default_value":"500","min":"400","max":"599","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"url_prefix":{"name":"url_prefix","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"devicehealth","can_run":true,"error_string":"","module_options":{"enable_monitoring":{"name":"enable_monitoring","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"monitor device health metrics","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"mark_out_threshold":{"name":"mark_out_threshold","type":"secs","level":"advanced","flags":1,"default_value":"2419200","min":"","max":"","enum_allowed":[],"desc":"automatically mark OSD if it may fail before this long","long_desc":"","tags":[],"see_also":[]},"pool_name":{"name":"pool_name","type":"str","level":"advanced","flags":1,"default_value":"device_health_metrics","min":"","max":"","enum_allowed":[],"desc":"name of pool in which to store device health metrics","long_desc":"","tags":[],"see_also":[]},"retention_period":{"name":"retention_period","type":"secs","level":"advanced","flags":1,"default_value":"15552000","min":"","max":"","enum_allowed":[],"desc":"how long to retain device health metrics","long_desc":"","tags":[],"see_also":[]},"scrape_frequency":{"name":"scrape_frequency","type":"secs","level":"advanced","flags":1,"default_value":"86400","min":"","max":"","enum_allowed":[],"desc":"how frequently to scrape device health metrics","long_desc":"","tags":[],"see_also":[]},"self_heal":{"name":"self_heal","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"preemptively heal cluster around devices that may fail","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to wake up and check device health","long_desc":"","tags":[],"see_also":[]},"warn_threshold":{"name":"warn_threshold","type":"secs","level":"advanced","flags":1,"default_value":"7257600","min":"","max":"","enum_allowed":[],"desc":"raise health warning if OSD may fail before this long","long_desc":"","tags":[],"see_also":[]}}},{"name":"diskprediction_local","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"predict_interval":{"name":"predict_interval","type":"str","level":"advanced","flags":0,"default_value":"86400","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"predictor_model":{"name":"predictor_model","type":"str","level":"advanced","flags":0,"default_value":"prophetstor","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"str","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"influx","can_run":false,"error_string":"influxdb python module not found","module_options":{"batch_size":{"name":"batch_size","type":"int","level":"advanced","flags":0,"default_value":"5000","min":"","max":"","enum_allowed":[],"desc":"How big batches of data points should be when sending to InfluxDB.","long_desc":"","tags":[],"see_also":[]},"database":{"name":"database","type":"str","level":"advanced","flags":0,"default_value":"ceph","min":"","max":"","enum_allowed":[],"desc":"InfluxDB database name. You will need to create this database and grant write privileges to the configured username or the username must have admin privileges to create it.","long_desc":"","tags":[],"see_also":[]},"hostname":{"name":"hostname","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"InfluxDB server hostname","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"30","min":"5","max":"","enum_allowed":[],"desc":"Time between reports to InfluxDB. Default 30 seconds.","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"password":{"name":"password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"password of InfluxDB server user","long_desc":"","tags":[],"see_also":[]},"port":{"name":"port","type":"int","level":"advanced","flags":0,"default_value":"8086","min":"","max":"","enum_allowed":[],"desc":"InfluxDB server port","long_desc":"","tags":[],"see_also":[]},"ssl":{"name":"ssl","type":"str","level":"advanced","flags":0,"default_value":"false","min":"","max":"","enum_allowed":[],"desc":"Use https connection for InfluxDB server. Use \"true\" or \"false\".","long_desc":"","tags":[],"see_also":[]},"threads":{"name":"threads","type":"int","level":"advanced","flags":0,"default_value":"5","min":"1","max":"32","enum_allowed":[],"desc":"How many worker threads should be spawned for sending data to InfluxDB.","long_desc":"","tags":[],"see_also":[]},"username":{"name":"username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"username of InfluxDB server user","long_desc":"","tags":[],"see_also":[]},"verify_ssl":{"name":"verify_ssl","type":"str","level":"advanced","flags":0,"default_value":"true","min":"","max":"","enum_allowed":[],"desc":"Verify https cert for InfluxDB server. Use \"true\" or \"false\".","long_desc":"","tags":[],"see_also":[]}}},{"name":"insights","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"iostat","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"k8sevents","can_run":true,"error_string":"","module_options":{"ceph_event_retention_days":{"name":"ceph_event_retention_days","type":"int","level":"advanced","flags":0,"default_value":"7","min":"","max":"","enum_allowed":[],"desc":"Days to hold ceph event information within local cache","long_desc":"","tags":[],"see_also":[]},"config_check_secs":{"name":"config_check_secs","type":"int","level":"advanced","flags":0,"default_value":"10","min":"10","max":"","enum_allowed":[],"desc":"interval (secs) to check for cluster configuration changes","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"localpool","can_run":true,"error_string":"","module_options":{"failure_domain":{"name":"failure_domain","type":"str","level":"advanced","flags":1,"default_value":"host","min":"","max":"","enum_allowed":[],"desc":"failure domain for any created local pool","long_desc":"what failure domain we should separate data replicas across.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"min_size":{"name":"min_size","type":"int","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"default min_size for any created local pool","long_desc":"value to set min_size to (unchanged from Ceph's default if this option is not set)","tags":[],"see_also":[]},"num_rep":{"name":"num_rep","type":"int","level":"advanced","flags":1,"default_value":"3","min":"","max":"","enum_allowed":[],"desc":"default replica count for any created local pool","long_desc":"","tags":[],"see_also":[]},"pg_num":{"name":"pg_num","type":"int","level":"advanced","flags":1,"default_value":"128","min":"","max":"","enum_allowed":[],"desc":"default pg_num for any created local pool","long_desc":"","tags":[],"see_also":[]},"prefix":{"name":"prefix","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"name prefix for any created local pool","long_desc":"","tags":[],"see_also":[]},"subtree":{"name":"subtree","type":"str","level":"advanced","flags":1,"default_value":"rack","min":"","max":"","enum_allowed":[],"desc":"CRUSH level for which to create a local pool","long_desc":"which CRUSH subtree type the module should create a pool for.","tags":[],"see_also":[]}}},{"name":"mds_autoscaler","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"mirroring","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"nfs","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"orchestrator","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"orchestrator":{"name":"orchestrator","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["cephadm","rook","test_orchestrator"],"desc":"Orchestrator backend","long_desc":"","tags":[],"see_also":[]}}},{"name":"osd_perf_query","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"osd_support","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"pg_autoscaler","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"noautoscale":{"name":"noautoscale","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"global autoscale flag","long_desc":"Option to turn on/off the autoscaler for all pools","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"threshold":{"name":"threshold","type":"float","level":"advanced","flags":0,"default_value":"3.0","min":"1.0","max":"","enum_allowed":[],"desc":"scaling threshold","long_desc":"The factor by which the `NEW PG_NUM` must vary from the current`PG_NUM` before being accepted. Cannot be less than 1.0","tags":[],"see_also":[]}}},{"name":"progress","can_run":true,"error_string":"","module_options":{"allow_pg_recovery_event":{"name":"allow_pg_recovery_event","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow the module to show pg recovery progress","long_desc":"","tags":[],"see_also":[]},"enabled":{"name":"enabled","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_completed_events":{"name":"max_completed_events","type":"int","level":"advanced","flags":1,"default_value":"50","min":"","max":"","enum_allowed":[],"desc":"number of past completed events to remember","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"how long the module is going to sleep","long_desc":"","tags":[],"see_also":[]}}},{"name":"prometheus","can_run":true,"error_string":"","module_options":{"cache":{"name":"cache","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rbd_stats_pools":{"name":"rbd_stats_pools","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rbd_stats_pools_refresh_interval":{"name":"rbd_stats_pools_refresh_interval","type":"int","level":"advanced","flags":0,"default_value":"300","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"scrape_interval":{"name":"scrape_interval","type":"float","level":"advanced","flags":0,"default_value":"15.0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"::","min":"","max":"","enum_allowed":[],"desc":"the IPv4 or IPv6 address on which the module listens for HTTP requests","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"int","level":"advanced","flags":0,"default_value":"9283","min":"","max":"","enum_allowed":[],"desc":"the port on which the module listens for HTTP requests","long_desc":"","tags":[],"see_also":[]},"stale_cache_strategy":{"name":"stale_cache_strategy","type":"str","level":"advanced","flags":0,"default_value":"log","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_behaviour":{"name":"standby_behaviour","type":"str","level":"advanced","flags":1,"default_value":"default","min":"","max":"","enum_allowed":["default","error"],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_error_status_code":{"name":"standby_error_status_code","type":"int","level":"advanced","flags":1,"default_value":"500","min":"400","max":"599","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rbd_support","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_concurrent_snap_create":{"name":"max_concurrent_snap_create","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"mirror_snapshot_schedule":{"name":"mirror_snapshot_schedule","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"trash_purge_schedule":{"name":"trash_purge_schedule","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"restful","can_run":true,"error_string":"","module_options":{"enable_auth":{"name":"enable_auth","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"key_file":{"name":"key_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rook","can_run":true,"error_string":"","module_options":{"drive_group_interval":{"name":"drive_group_interval","type":"float","level":"advanced","flags":0,"default_value":"300.0","min":"","max":"","enum_allowed":[],"desc":"interval in seconds between re-application of applied drive_groups","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"storage_class":{"name":"storage_class","type":"str","level":"advanced","flags":0,"default_value":"local","min":"","max":"","enum_allowed":[],"desc":"storage class name for LSO-discovered PVs","long_desc":"","tags":[],"see_also":[]}}},{"name":"selftest","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"roption1":{"name":"roption1","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"roption2":{"name":"roption2","type":"str","level":"advanced","flags":0,"default_value":"xyz","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption1":{"name":"rwoption1","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption2":{"name":"rwoption2","type":"int","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption3":{"name":"rwoption3","type":"float","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption4":{"name":"rwoption4","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption5":{"name":"rwoption5","type":"bool","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption6":{"name":"rwoption6","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption7":{"name":"rwoption7","type":"int","level":"advanced","flags":0,"default_value":"","min":"1","max":"42","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testkey":{"name":"testkey","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testlkey":{"name":"testlkey","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testnewline":{"name":"testnewline","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"snap_schedule","can_run":true,"error_string":"","module_options":{"allow_m_granularity":{"name":"allow_m_granularity","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow minute scheduled snapshots","long_desc":"","tags":[],"see_also":[]},"dump_on_update":{"name":"dump_on_update","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"dump database to debug log on update","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"stats","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"status","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"telegraf","can_run":true,"error_string":"","module_options":{"address":{"name":"address","type":"str","level":"advanced","flags":0,"default_value":"unixgram:///tmp/telegraf.sock","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"15","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"telemetry","can_run":true,"error_string":"","module_options":{"channel_basic":{"name":"channel_basic","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share basic cluster information (size, version)","long_desc":"","tags":[],"see_also":[]},"channel_crash":{"name":"channel_crash","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share metadata about Ceph daemon crashes (version, stack straces, etc)","long_desc":"","tags":[],"see_also":[]},"channel_device":{"name":"channel_device","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share device health metrics (e.g., SMART data, minus potentially identifying info like serial numbers)","long_desc":"","tags":[],"see_also":[]},"channel_ident":{"name":"channel_ident","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Share a user-provided description and/or contact email for the cluster","long_desc":"","tags":[],"see_also":[]},"channel_perf":{"name":"channel_perf","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Share various performance metrics of a cluster","long_desc":"","tags":[],"see_also":[]},"contact":{"name":"contact","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"description":{"name":"description","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"device_url":{"name":"device_url","type":"str","level":"advanced","flags":0,"default_value":"https://telemetry.ceph.com/device","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"enabled":{"name":"enabled","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"int","level":"advanced","flags":0,"default_value":"24","min":"8","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"last_opt_revision":{"name":"last_opt_revision","type":"int","level":"advanced","flags":0,"default_value":"1","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"leaderboard":{"name":"leaderboard","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"organization":{"name":"organization","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"proxy":{"name":"proxy","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"url":{"name":"url","type":"str","level":"advanced","flags":0,"default_value":"https://telemetry.ceph.com/report","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"test_orchestrator","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"volumes","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_concurrent_clones":{"name":"max_concurrent_clones","type":"int","level":"advanced","flags":0,"default_value":"4","min":"","max":"","enum_allowed":[],"desc":"Number of asynchronous cloner threads","long_desc":"","tags":[],"see_also":[]},"snapshot_clone_delay":{"name":"snapshot_clone_delay","type":"int","level":"advanced","flags":0,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"Delay clone begin operation by snapshot_clone_delay seconds","long_desc":"","tags":[],"see_also":[]}}},{"name":"zabbix","can_run":true,"error_string":"","module_options":{"discovery_interval":{"name":"discovery_interval","type":"uint","level":"advanced","flags":0,"default_value":"100","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"identifier":{"name":"identifier","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_host":{"name":"zabbix_host","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_port":{"name":"zabbix_port","type":"int","level":"advanced","flags":0,"default_value":"10051","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_sender":{"name":"zabbix_sender","type":"str","level":"advanced","flags":0,"default_value":"/usr/bin/zabbix_sender","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}}]}],"modules":["cephadm","dashboard","iostat","nfs","prometheus","restful"],"available_modules":[{"name":"alerts","can_run":true,"error_string":"","module_options":{"interval":{"name":"interval","type":"secs","level":"advanced","flags":1,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"How frequently to reexamine health status","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"smtp_destination":{"name":"smtp_destination","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Email address to send alerts to","long_desc":"","tags":[],"see_also":[]},"smtp_from_name":{"name":"smtp_from_name","type":"str","level":"advanced","flags":1,"default_value":"Ceph","min":"","max":"","enum_allowed":[],"desc":"Email From: name","long_desc":"","tags":[],"see_also":[]},"smtp_host":{"name":"smtp_host","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"SMTP server","long_desc":"","tags":[],"see_also":[]},"smtp_password":{"name":"smtp_password","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Password to authenticate with","long_desc":"","tags":[],"see_also":[]},"smtp_port":{"name":"smtp_port","type":"int","level":"advanced","flags":1,"default_value":"465","min":"","max":"","enum_allowed":[],"desc":"SMTP port","long_desc":"","tags":[],"see_also":[]},"smtp_sender":{"name":"smtp_sender","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"SMTP envelope sender","long_desc":"","tags":[],"see_also":[]},"smtp_ssl":{"name":"smtp_ssl","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Use SSL to connect to SMTP server","long_desc":"","tags":[],"see_also":[]},"smtp_user":{"name":"smtp_user","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"User to authenticate as","long_desc":"","tags":[],"see_also":[]}}},{"name":"balancer","can_run":true,"error_string":"","module_options":{"active":{"name":"active","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"automatically balance PGs across cluster","long_desc":"","tags":[],"see_also":[]},"begin_time":{"name":"begin_time","type":"str","level":"advanced","flags":1,"default_value":"0000","min":"","max":"","enum_allowed":[],"desc":"beginning time of day to automatically balance","long_desc":"This is a time of day in the format HHMM.","tags":[],"see_also":[]},"begin_weekday":{"name":"begin_weekday","type":"uint","level":"advanced","flags":1,"default_value":"0","min":"0","max":"7","enum_allowed":[],"desc":"Restrict automatic balancing to this day of the week or later","long_desc":"0 or 7 = Sunday, 1 = Monday, etc.","tags":[],"see_also":[]},"crush_compat_max_iterations":{"name":"crush_compat_max_iterations","type":"uint","level":"advanced","flags":1,"default_value":"25","min":"1","max":"250","enum_allowed":[],"desc":"maximum number of iterations to attempt optimization","long_desc":"","tags":[],"see_also":[]},"crush_compat_metrics":{"name":"crush_compat_metrics","type":"str","level":"advanced","flags":1,"default_value":"pgs,objects,bytes","min":"","max":"","enum_allowed":[],"desc":"metrics with which to calculate OSD utilization","long_desc":"Value is a list of one or more of \"pgs\", \"objects\", or \"bytes\", and indicates which metrics to use to balance utilization.","tags":[],"see_also":[]},"crush_compat_step":{"name":"crush_compat_step","type":"float","level":"advanced","flags":1,"default_value":"0.5","min":"0.001","max":"0.999","enum_allowed":[],"desc":"aggressiveness of optimization","long_desc":".99 is very aggressive, .01 is less aggressive","tags":[],"see_also":[]},"end_time":{"name":"end_time","type":"str","level":"advanced","flags":1,"default_value":"2400","min":"","max":"","enum_allowed":[],"desc":"ending time of day to automatically balance","long_desc":"This is a time of day in the format HHMM.","tags":[],"see_also":[]},"end_weekday":{"name":"end_weekday","type":"uint","level":"advanced","flags":1,"default_value":"7","min":"0","max":"7","enum_allowed":[],"desc":"Restrict automatic balancing to days of the week earlier than this","long_desc":"0 or 7 = Sunday, 1 = Monday, etc.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"min_score":{"name":"min_score","type":"float","level":"advanced","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"minimum score, below which no optimization is attempted","long_desc":"","tags":[],"see_also":[]},"mode":{"name":"mode","type":"str","level":"advanced","flags":1,"default_value":"upmap","min":"","max":"","enum_allowed":["crush-compat","none","upmap"],"desc":"Balancer mode","long_desc":"","tags":[],"see_also":[]},"pool_ids":{"name":"pool_ids","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"pools which the automatic balancing will be limited to","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"how frequently to wake up and attempt optimization","long_desc":"","tags":[],"see_also":[]},"upmap_max_deviation":{"name":"upmap_max_deviation","type":"int","level":"advanced","flags":1,"default_value":"5","min":"1","max":"","enum_allowed":[],"desc":"deviation below which no optimization is attempted","long_desc":"If the number of PGs are within this count then no optimization is attempted","tags":[],"see_also":[]},"upmap_max_optimizations":{"name":"upmap_max_optimizations","type":"uint","level":"advanced","flags":1,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"maximum upmap optimizations to make per attempt","long_desc":"","tags":[],"see_also":[]}}},{"name":"cephadm","can_run":true,"error_string":"","module_options":{"agent_down_multiplier":{"name":"agent_down_multiplier","type":"float","level":"advanced","flags":0,"default_value":"3.0","min":"","max":"","enum_allowed":[],"desc":"Multiplied by agent refresh rate to calculate how long agent must not report before being marked down","long_desc":"","tags":[],"see_also":[]},"agent_refresh_rate":{"name":"agent_refresh_rate","type":"secs","level":"advanced","flags":0,"default_value":"20","min":"","max":"","enum_allowed":[],"desc":"How often agent on each host will try to gather and send metadata","long_desc":"","tags":[],"see_also":[]},"agent_starting_port":{"name":"agent_starting_port","type":"int","level":"advanced","flags":0,"default_value":"4721","min":"","max":"","enum_allowed":[],"desc":"First port agent will try to bind to (will also try up to next 1000 subsequent ports if blocked)","long_desc":"","tags":[],"see_also":[]},"allow_ptrace":{"name":"allow_ptrace","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow SYS_PTRACE capability on ceph containers","long_desc":"The SYS_PTRACE capability is needed to attach to a process with gdb or strace. Enabling this options can allow debugging daemons that encounter problems at runtime.","tags":[],"see_also":[]},"autotune_interval":{"name":"autotune_interval","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to autotune daemon memory","long_desc":"","tags":[],"see_also":[]},"autotune_memory_target_ratio":{"name":"autotune_memory_target_ratio","type":"float","level":"advanced","flags":0,"default_value":"0.7","min":"","max":"","enum_allowed":[],"desc":"ratio of total system memory to divide amongst autotuned daemons","long_desc":"","tags":[],"see_also":[]},"config_checks_enabled":{"name":"config_checks_enabled","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable or disable the cephadm configuration analysis","long_desc":"","tags":[],"see_also":[]},"config_dashboard":{"name":"config_dashboard","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"manage configs like API endpoints in Dashboard.","long_desc":"","tags":[],"see_also":[]},"container_image_alertmanager":{"name":"container_image_alertmanager","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/alertmanager:v0.23.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_base":{"name":"container_image_base","type":"str","level":"advanced","flags":1,"default_value":"quay.io/ceph/ceph","min":"","max":"","enum_allowed":[],"desc":"Container image name, without the tag","long_desc":"","tags":[],"see_also":[]},"container_image_grafana":{"name":"container_image_grafana","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/ceph-grafana:8.3.5","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_haproxy":{"name":"container_image_haproxy","type":"str","level":"advanced","flags":0,"default_value":"docker.io/library/haproxy:2.3","min":"","max":"","enum_allowed":[],"desc":"HAproxy container image","long_desc":"","tags":[],"see_also":[]},"container_image_keepalived":{"name":"container_image_keepalived","type":"str","level":"advanced","flags":0,"default_value":"docker.io/arcts/keepalived","min":"","max":"","enum_allowed":[],"desc":"Keepalived container image","long_desc":"","tags":[],"see_also":[]},"container_image_node_exporter":{"name":"container_image_node_exporter","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/node-exporter:v1.3.1","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_prometheus":{"name":"container_image_prometheus","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/prometheus:v2.33.4","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_snmp_gateway":{"name":"container_image_snmp_gateway","type":"str","level":"advanced","flags":0,"default_value":"docker.io/maxwo/snmp-notifier:v1.2.1","min":"","max":"","enum_allowed":[],"desc":"SNMP Gateway container image","long_desc":"","tags":[],"see_also":[]},"container_init":{"name":"container_init","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Run podman/docker with `--init`","long_desc":"","tags":[],"see_also":[]},"daemon_cache_timeout":{"name":"daemon_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"seconds to cache service (daemon) inventory","long_desc":"","tags":[],"see_also":[]},"default_registry":{"name":"default_registry","type":"str","level":"advanced","flags":0,"default_value":"docker.io","min":"","max":"","enum_allowed":[],"desc":"Search-registry to which we should normalize unqualified image names. This is not the default registry","long_desc":"","tags":[],"see_also":[]},"device_cache_timeout":{"name":"device_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"1800","min":"","max":"","enum_allowed":[],"desc":"seconds to cache device inventory","long_desc":"","tags":[],"see_also":[]},"device_enhanced_scan":{"name":"device_enhanced_scan","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Use libstoragemgmt during device scans","long_desc":"","tags":[],"see_also":[]},"facts_cache_timeout":{"name":"facts_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"seconds to cache host facts data","long_desc":"","tags":[],"see_also":[]},"host_check_interval":{"name":"host_check_interval","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to perform a host check","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"log to the \"cephadm\" cluster log channel\"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"manage_etc_ceph_ceph_conf":{"name":"manage_etc_ceph_ceph_conf","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Manage and own /etc/ceph/ceph.conf on the hosts.","long_desc":"","tags":[],"see_also":[]},"manage_etc_ceph_ceph_conf_hosts":{"name":"manage_etc_ceph_ceph_conf_hosts","type":"str","level":"advanced","flags":0,"default_value":"*","min":"","max":"","enum_allowed":[],"desc":"PlacementSpec describing on which hosts to manage /etc/ceph/ceph.conf","long_desc":"","tags":[],"see_also":[]},"max_count_per_host":{"name":"max_count_per_host","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"max number of daemons per service per host","long_desc":"","tags":[],"see_also":[]},"max_osd_draining_count":{"name":"max_osd_draining_count","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"max number of osds that will be drained simultaneously when osds are removed","long_desc":"","tags":[],"see_also":[]},"migration_current":{"name":"migration_current","type":"int","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"internal - do not modify","long_desc":"","tags":[],"see_also":[]},"mode":{"name":"mode","type":"str","level":"advanced","flags":0,"default_value":"root","min":"","max":"","enum_allowed":["cephadm-package","root"],"desc":"mode for remote execution of cephadm","long_desc":"","tags":[],"see_also":[]},"prometheus_alerts_path":{"name":"prometheus_alerts_path","type":"str","level":"advanced","flags":0,"default_value":"/etc/prometheus/ceph/ceph_default_alerts.yml","min":"","max":"","enum_allowed":[],"desc":"location of alerts to include in prometheus deployments","long_desc":"","tags":[],"see_also":[]},"registry_insecure":{"name":"registry_insecure","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Registry is to be considered insecure (no TLS available). Only for development purposes.","long_desc":"","tags":[],"see_also":[]},"registry_password":{"name":"registry_password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Custom repository password. Only used for logging into a registry.","long_desc":"","tags":[],"see_also":[]},"registry_url":{"name":"registry_url","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Registry url for login purposes. This is not the default registry","long_desc":"","tags":[],"see_also":[]},"registry_username":{"name":"registry_username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Custom repository username. Only used for logging into a registry.","long_desc":"","tags":[],"see_also":[]},"ssh_config_file":{"name":"ssh_config_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"customized SSH config file to connect to managed hosts","long_desc":"","tags":[],"see_also":[]},"use_agent":{"name":"use_agent","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Use cephadm agent on each host to gather and send metadata","long_desc":"","tags":[],"see_also":[]},"use_repo_digest":{"name":"use_repo_digest","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Automatically convert image tags to image digest. Make sure all daemons use the same image","long_desc":"","tags":[],"see_also":[]},"warn_on_failed_host_check":{"name":"warn_on_failed_host_check","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if the host check fails","long_desc":"","tags":[],"see_also":[]},"warn_on_stray_daemons":{"name":"warn_on_stray_daemons","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if daemons are detected that are not managed by cephadm","long_desc":"","tags":[],"see_also":[]},"warn_on_stray_hosts":{"name":"warn_on_stray_hosts","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if daemons are detected on a host that is not managed by cephadm","long_desc":"","tags":[],"see_also":[]}}},{"name":"crash","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"retain_interval":{"name":"retain_interval","type":"secs","level":"advanced","flags":1,"default_value":"31536000","min":"","max":"","enum_allowed":[],"desc":"how long to retain crashes before pruning them","long_desc":"","tags":[],"see_also":[]},"warn_recent_interval":{"name":"warn_recent_interval","type":"secs","level":"advanced","flags":1,"default_value":"1209600","min":"","max":"","enum_allowed":[],"desc":"time interval in which to warn about recent crashes","long_desc":"","tags":[],"see_also":[]}}},{"name":"dashboard","can_run":true,"error_string":"","module_options":{"ACCOUNT_LOCKOUT_ATTEMPTS":{"name":"ACCOUNT_LOCKOUT_ATTEMPTS","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ALERTMANAGER_API_HOST":{"name":"ALERTMANAGER_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ALERTMANAGER_API_SSL_VERIFY":{"name":"ALERTMANAGER_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"AUDIT_API_ENABLED":{"name":"AUDIT_API_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"AUDIT_API_LOG_PAYLOAD":{"name":"AUDIT_API_LOG_PAYLOAD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ENABLE_BROWSABLE_API":{"name":"ENABLE_BROWSABLE_API","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_CEPHFS":{"name":"FEATURE_TOGGLE_CEPHFS","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_ISCSI":{"name":"FEATURE_TOGGLE_ISCSI","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_MIRRORING":{"name":"FEATURE_TOGGLE_MIRRORING","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_NFS":{"name":"FEATURE_TOGGLE_NFS","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_RBD":{"name":"FEATURE_TOGGLE_RBD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_RGW":{"name":"FEATURE_TOGGLE_RGW","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE":{"name":"GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_PASSWORD":{"name":"GRAFANA_API_PASSWORD","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_SSL_VERIFY":{"name":"GRAFANA_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_URL":{"name":"GRAFANA_API_URL","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_USERNAME":{"name":"GRAFANA_API_USERNAME","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_FRONTEND_API_URL":{"name":"GRAFANA_FRONTEND_API_URL","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_UPDATE_DASHBOARDS":{"name":"GRAFANA_UPDATE_DASHBOARDS","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ISCSI_API_SSL_VERIFICATION":{"name":"ISCSI_API_SSL_VERIFICATION","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ISSUE_TRACKER_API_KEY":{"name":"ISSUE_TRACKER_API_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PROMETHEUS_API_HOST":{"name":"PROMETHEUS_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PROMETHEUS_API_SSL_VERIFY":{"name":"PROMETHEUS_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_COMPLEXITY_ENABLED":{"name":"PWD_POLICY_CHECK_COMPLEXITY_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_EXCLUSION_LIST_ENABLED":{"name":"PWD_POLICY_CHECK_EXCLUSION_LIST_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_LENGTH_ENABLED":{"name":"PWD_POLICY_CHECK_LENGTH_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_OLDPWD_ENABLED":{"name":"PWD_POLICY_CHECK_OLDPWD_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_REPETITIVE_CHARS_ENABLED":{"name":"PWD_POLICY_CHECK_REPETITIVE_CHARS_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_SEQUENTIAL_CHARS_ENABLED":{"name":"PWD_POLICY_CHECK_SEQUENTIAL_CHARS_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_USERNAME_ENABLED":{"name":"PWD_POLICY_CHECK_USERNAME_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_ENABLED":{"name":"PWD_POLICY_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_EXCLUSION_LIST":{"name":"PWD_POLICY_EXCLUSION_LIST","type":"str","level":"advanced","flags":0,"default_value":"osd,host,dashboard,pool,block,nfs,ceph,monitors,gateway,logs,crush,maps","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_MIN_COMPLEXITY":{"name":"PWD_POLICY_MIN_COMPLEXITY","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_MIN_LENGTH":{"name":"PWD_POLICY_MIN_LENGTH","type":"int","level":"advanced","flags":0,"default_value":"8","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"REST_REQUESTS_TIMEOUT":{"name":"REST_REQUESTS_TIMEOUT","type":"int","level":"advanced","flags":0,"default_value":"45","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_ACCESS_KEY":{"name":"RGW_API_ACCESS_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_ADMIN_RESOURCE":{"name":"RGW_API_ADMIN_RESOURCE","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SECRET_KEY":{"name":"RGW_API_SECRET_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SSL_VERIFY":{"name":"RGW_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_SPAN":{"name":"USER_PWD_EXPIRATION_SPAN","type":"int","level":"advanced","flags":0,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_WARNING_1":{"name":"USER_PWD_EXPIRATION_WARNING_1","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_WARNING_2":{"name":"USER_PWD_EXPIRATION_WARNING_2","type":"int","level":"advanced","flags":0,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"crt_file":{"name":"crt_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"debug":{"name":"debug","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable/disable debug options","long_desc":"","tags":[],"see_also":[]},"jwt_token_ttl":{"name":"jwt_token_ttl","type":"int","level":"advanced","flags":0,"default_value":"28800","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"key_file":{"name":"key_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"motd":{"name":"motd","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"The message of the day","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"::","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"int","level":"advanced","flags":0,"default_value":"8080","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl":{"name":"ssl","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl_server_port":{"name":"ssl_server_port","type":"int","level":"advanced","flags":0,"default_value":"8443","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_behaviour":{"name":"standby_behaviour","type":"str","level":"advanced","flags":0,"default_value":"redirect","min":"","max":"","enum_allowed":["error","redirect"],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_error_status_code":{"name":"standby_error_status_code","type":"int","level":"advanced","flags":0,"default_value":"500","min":"400","max":"599","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"url_prefix":{"name":"url_prefix","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"devicehealth","can_run":true,"error_string":"","module_options":{"enable_monitoring":{"name":"enable_monitoring","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"monitor device health metrics","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"mark_out_threshold":{"name":"mark_out_threshold","type":"secs","level":"advanced","flags":1,"default_value":"2419200","min":"","max":"","enum_allowed":[],"desc":"automatically mark OSD if it may fail before this long","long_desc":"","tags":[],"see_also":[]},"pool_name":{"name":"pool_name","type":"str","level":"advanced","flags":1,"default_value":"device_health_metrics","min":"","max":"","enum_allowed":[],"desc":"name of pool in which to store device health metrics","long_desc":"","tags":[],"see_also":[]},"retention_period":{"name":"retention_period","type":"secs","level":"advanced","flags":1,"default_value":"15552000","min":"","max":"","enum_allowed":[],"desc":"how long to retain device health metrics","long_desc":"","tags":[],"see_also":[]},"scrape_frequency":{"name":"scrape_frequency","type":"secs","level":"advanced","flags":1,"default_value":"86400","min":"","max":"","enum_allowed":[],"desc":"how frequently to scrape device health metrics","long_desc":"","tags":[],"see_also":[]},"self_heal":{"name":"self_heal","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"preemptively heal cluster around devices that may fail","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to wake up and check device health","long_desc":"","tags":[],"see_also":[]},"warn_threshold":{"name":"warn_threshold","type":"secs","level":"advanced","flags":1,"default_value":"7257600","min":"","max":"","enum_allowed":[],"desc":"raise health warning if OSD may fail before this long","long_desc":"","tags":[],"see_also":[]}}},{"name":"diskprediction_local","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"predict_interval":{"name":"predict_interval","type":"str","level":"advanced","flags":0,"default_value":"86400","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"predictor_model":{"name":"predictor_model","type":"str","level":"advanced","flags":0,"default_value":"prophetstor","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"str","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"influx","can_run":false,"error_string":"influxdb python module not found","module_options":{"batch_size":{"name":"batch_size","type":"int","level":"advanced","flags":0,"default_value":"5000","min":"","max":"","enum_allowed":[],"desc":"How big batches of data points should be when sending to InfluxDB.","long_desc":"","tags":[],"see_also":[]},"database":{"name":"database","type":"str","level":"advanced","flags":0,"default_value":"ceph","min":"","max":"","enum_allowed":[],"desc":"InfluxDB database name. You will need to create this database and grant write privileges to the configured username or the username must have admin privileges to create it.","long_desc":"","tags":[],"see_also":[]},"hostname":{"name":"hostname","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"InfluxDB server hostname","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"30","min":"5","max":"","enum_allowed":[],"desc":"Time between reports to InfluxDB. Default 30 seconds.","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"password":{"name":"password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"password of InfluxDB server user","long_desc":"","tags":[],"see_also":[]},"port":{"name":"port","type":"int","level":"advanced","flags":0,"default_value":"8086","min":"","max":"","enum_allowed":[],"desc":"InfluxDB server port","long_desc":"","tags":[],"see_also":[]},"ssl":{"name":"ssl","type":"str","level":"advanced","flags":0,"default_value":"false","min":"","max":"","enum_allowed":[],"desc":"Use https connection for InfluxDB server. Use \"true\" or \"false\".","long_desc":"","tags":[],"see_also":[]},"threads":{"name":"threads","type":"int","level":"advanced","flags":0,"default_value":"5","min":"1","max":"32","enum_allowed":[],"desc":"How many worker threads should be spawned for sending data to InfluxDB.","long_desc":"","tags":[],"see_also":[]},"username":{"name":"username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"username of InfluxDB server user","long_desc":"","tags":[],"see_also":[]},"verify_ssl":{"name":"verify_ssl","type":"str","level":"advanced","flags":0,"default_value":"true","min":"","max":"","enum_allowed":[],"desc":"Verify https cert for InfluxDB server. Use \"true\" or \"false\".","long_desc":"","tags":[],"see_also":[]}}},{"name":"insights","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"iostat","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"k8sevents","can_run":true,"error_string":"","module_options":{"ceph_event_retention_days":{"name":"ceph_event_retention_days","type":"int","level":"advanced","flags":0,"default_value":"7","min":"","max":"","enum_allowed":[],"desc":"Days to hold ceph event information within local cache","long_desc":"","tags":[],"see_also":[]},"config_check_secs":{"name":"config_check_secs","type":"int","level":"advanced","flags":0,"default_value":"10","min":"10","max":"","enum_allowed":[],"desc":"interval (secs) to check for cluster configuration changes","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"localpool","can_run":true,"error_string":"","module_options":{"failure_domain":{"name":"failure_domain","type":"str","level":"advanced","flags":1,"default_value":"host","min":"","max":"","enum_allowed":[],"desc":"failure domain for any created local pool","long_desc":"what failure domain we should separate data replicas across.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"min_size":{"name":"min_size","type":"int","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"default min_size for any created local pool","long_desc":"value to set min_size to (unchanged from Ceph's default if this option is not set)","tags":[],"see_also":[]},"num_rep":{"name":"num_rep","type":"int","level":"advanced","flags":1,"default_value":"3","min":"","max":"","enum_allowed":[],"desc":"default replica count for any created local pool","long_desc":"","tags":[],"see_also":[]},"pg_num":{"name":"pg_num","type":"int","level":"advanced","flags":1,"default_value":"128","min":"","max":"","enum_allowed":[],"desc":"default pg_num for any created local pool","long_desc":"","tags":[],"see_also":[]},"prefix":{"name":"prefix","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"name prefix for any created local pool","long_desc":"","tags":[],"see_also":[]},"subtree":{"name":"subtree","type":"str","level":"advanced","flags":1,"default_value":"rack","min":"","max":"","enum_allowed":[],"desc":"CRUSH level for which to create a local pool","long_desc":"which CRUSH subtree type the module should create a pool for.","tags":[],"see_also":[]}}},{"name":"mds_autoscaler","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"mirroring","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"nfs","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"orchestrator","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"orchestrator":{"name":"orchestrator","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["cephadm","rook","test_orchestrator"],"desc":"Orchestrator backend","long_desc":"","tags":[],"see_also":[]}}},{"name":"osd_perf_query","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"osd_support","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"pg_autoscaler","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"noautoscale":{"name":"noautoscale","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"global autoscale flag","long_desc":"Option to turn on/off the autoscaler for all pools","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"threshold":{"name":"threshold","type":"float","level":"advanced","flags":0,"default_value":"3.0","min":"1.0","max":"","enum_allowed":[],"desc":"scaling threshold","long_desc":"The factor by which the `NEW PG_NUM` must vary from the current`PG_NUM` before being accepted. Cannot be less than 1.0","tags":[],"see_also":[]}}},{"name":"progress","can_run":true,"error_string":"","module_options":{"allow_pg_recovery_event":{"name":"allow_pg_recovery_event","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow the module to show pg recovery progress","long_desc":"","tags":[],"see_also":[]},"enabled":{"name":"enabled","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_completed_events":{"name":"max_completed_events","type":"int","level":"advanced","flags":1,"default_value":"50","min":"","max":"","enum_allowed":[],"desc":"number of past completed events to remember","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"how long the module is going to sleep","long_desc":"","tags":[],"see_also":[]}}},{"name":"prometheus","can_run":true,"error_string":"","module_options":{"cache":{"name":"cache","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rbd_stats_pools":{"name":"rbd_stats_pools","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rbd_stats_pools_refresh_interval":{"name":"rbd_stats_pools_refresh_interval","type":"int","level":"advanced","flags":0,"default_value":"300","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"scrape_interval":{"name":"scrape_interval","type":"float","level":"advanced","flags":0,"default_value":"15.0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"::","min":"","max":"","enum_allowed":[],"desc":"the IPv4 or IPv6 address on which the module listens for HTTP requests","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"int","level":"advanced","flags":0,"default_value":"9283","min":"","max":"","enum_allowed":[],"desc":"the port on which the module listens for HTTP requests","long_desc":"","tags":[],"see_also":[]},"stale_cache_strategy":{"name":"stale_cache_strategy","type":"str","level":"advanced","flags":0,"default_value":"log","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_behaviour":{"name":"standby_behaviour","type":"str","level":"advanced","flags":1,"default_value":"default","min":"","max":"","enum_allowed":["default","error"],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_error_status_code":{"name":"standby_error_status_code","type":"int","level":"advanced","flags":1,"default_value":"500","min":"400","max":"599","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rbd_support","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_concurrent_snap_create":{"name":"max_concurrent_snap_create","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"mirror_snapshot_schedule":{"name":"mirror_snapshot_schedule","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"trash_purge_schedule":{"name":"trash_purge_schedule","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"restful","can_run":true,"error_string":"","module_options":{"enable_auth":{"name":"enable_auth","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"key_file":{"name":"key_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rook","can_run":true,"error_string":"","module_options":{"drive_group_interval":{"name":"drive_group_interval","type":"float","level":"advanced","flags":0,"default_value":"300.0","min":"","max":"","enum_allowed":[],"desc":"interval in seconds between re-application of applied drive_groups","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"storage_class":{"name":"storage_class","type":"str","level":"advanced","flags":0,"default_value":"local","min":"","max":"","enum_allowed":[],"desc":"storage class name for LSO-discovered PVs","long_desc":"","tags":[],"see_also":[]}}},{"name":"selftest","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"roption1":{"name":"roption1","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"roption2":{"name":"roption2","type":"str","level":"advanced","flags":0,"default_value":"xyz","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption1":{"name":"rwoption1","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption2":{"name":"rwoption2","type":"int","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption3":{"name":"rwoption3","type":"float","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption4":{"name":"rwoption4","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption5":{"name":"rwoption5","type":"bool","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption6":{"name":"rwoption6","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption7":{"name":"rwoption7","type":"int","level":"advanced","flags":0,"default_value":"","min":"1","max":"42","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testkey":{"name":"testkey","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testlkey":{"name":"testlkey","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testnewline":{"name":"testnewline","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"snap_schedule","can_run":true,"error_string":"","module_options":{"allow_m_granularity":{"name":"allow_m_granularity","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow minute scheduled snapshots","long_desc":"","tags":[],"see_also":[]},"dump_on_update":{"name":"dump_on_update","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"dump database to debug log on update","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"stats","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"status","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"telegraf","can_run":true,"error_string":"","module_options":{"address":{"name":"address","type":"str","level":"advanced","flags":0,"default_value":"unixgram:///tmp/telegraf.sock","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"15","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"telemetry","can_run":true,"error_string":"","module_options":{"channel_basic":{"name":"channel_basic","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share basic cluster information (size, version)","long_desc":"","tags":[],"see_also":[]},"channel_crash":{"name":"channel_crash","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share metadata about Ceph daemon crashes (version, stack straces, etc)","long_desc":"","tags":[],"see_also":[]},"channel_device":{"name":"channel_device","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share device health metrics (e.g., SMART data, minus potentially identifying info like serial numbers)","long_desc":"","tags":[],"see_also":[]},"channel_ident":{"name":"channel_ident","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Share a user-provided description and/or contact email for the cluster","long_desc":"","tags":[],"see_also":[]},"channel_perf":{"name":"channel_perf","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Share various performance metrics of a cluster","long_desc":"","tags":[],"see_also":[]},"contact":{"name":"contact","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"description":{"name":"description","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"device_url":{"name":"device_url","type":"str","level":"advanced","flags":0,"default_value":"https://telemetry.ceph.com/device","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"enabled":{"name":"enabled","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"int","level":"advanced","flags":0,"default_value":"24","min":"8","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"last_opt_revision":{"name":"last_opt_revision","type":"int","level":"advanced","flags":0,"default_value":"1","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"leaderboard":{"name":"leaderboard","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"organization":{"name":"organization","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"proxy":{"name":"proxy","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"url":{"name":"url","type":"str","level":"advanced","flags":0,"default_value":"https://telemetry.ceph.com/report","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"test_orchestrator","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"volumes","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_concurrent_clones":{"name":"max_concurrent_clones","type":"int","level":"advanced","flags":0,"default_value":"4","min":"","max":"","enum_allowed":[],"desc":"Number of asynchronous cloner threads","long_desc":"","tags":[],"see_also":[]},"snapshot_clone_delay":{"name":"snapshot_clone_delay","type":"int","level":"advanced","flags":0,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"Delay clone begin operation by snapshot_clone_delay seconds","long_desc":"","tags":[],"see_also":[]}}},{"name":"zabbix","can_run":true,"error_string":"","module_options":{"discovery_interval":{"name":"discovery_interval","type":"uint","level":"advanced","flags":0,"default_value":"100","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"identifier":{"name":"identifier","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_host":{"name":"zabbix_host","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_port":{"name":"zabbix_port","type":"int","level":"advanced","flags":0,"default_value":"10051","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_sender":{"name":"zabbix_sender","type":"str","level":"advanced","flags":0,"default_value":"/usr/bin/zabbix_sender","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}}],"services":{"dashboard":"https://192.168.123.104:8443/","prometheus":"http://192.168.123.104:9283/"},"always_on_modules":{"octopus":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"],"pacific":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"],"quincy":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"],"last_failure_osd_epoch":45,"active_clients":[{"addrvec":[{"type":"v2","addr":"192.168.123.104:0","nonce":961449681}]},{"addrvec":[{"type":"v2","addr":"192.168.123.104:0","nonce":3601859199}]},{"addrvec":[{"type":"v2","addr":"192.168.123.104:0","nonce":3385791586}]},{"addrvec":[{"type":"v2","addr":"192.168.123.104:0","nonce":2938686256}]}]}} 2026-03-08T23:56:21.888 INFO:tasks.cephadm.ceph_manager.ceph:mgr available! 2026-03-08T23:56:21.888 INFO:tasks.cephadm.ceph_manager.ceph:waiting for all up 2026-03-08T23:56:21.888 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -- ceph osd dump --format=json 2026-03-08T23:56:22.071 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/mon.a/config 2026-03-08T23:56:22.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:21 vm04 ceph-mon[51053]: from='client.? 192.168.123.110:0/663606932' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-08T23:56:22.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:21 vm04 ceph-mon[51053]: from='client.? ' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-08T23:56:22.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:21 vm04 ceph-mon[51053]: from='client.? ' entity='client.admin' cmd='[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]': finished 2026-03-08T23:56:22.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:21 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/1202951165' entity='client.admin' cmd=[{"prefix": "mgr dump", "format": "json"}]: dispatch 2026-03-08T23:56:22.106 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:21 vm04 ceph-mon[46823]: from='client.? 192.168.123.110:0/663606932' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-08T23:56:22.106 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:21 vm04 ceph-mon[46823]: from='client.? ' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-08T23:56:22.106 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:21 vm04 ceph-mon[46823]: from='client.? ' entity='client.admin' cmd='[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]': finished 2026-03-08T23:56:22.106 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:21 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/1202951165' entity='client.admin' cmd=[{"prefix": "mgr dump", "format": "json"}]: dispatch 2026-03-08T23:56:22.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:21 vm10 ceph-mon[48982]: from='client.? 192.168.123.110:0/663606932' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-08T23:56:22.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:21 vm10 ceph-mon[48982]: from='client.? ' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-08T23:56:22.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:21 vm10 ceph-mon[48982]: from='client.? ' entity='client.admin' cmd='[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]': finished 2026-03-08T23:56:22.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:21 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/1202951165' entity='client.admin' cmd=[{"prefix": "mgr dump", "format": "json"}]: dispatch 2026-03-08T23:56:22.540 INFO:teuthology.orchestra.run.vm04.stdout: 2026-03-08T23:56:22.540 INFO:teuthology.orchestra.run.vm04.stdout:{"epoch":45,"fsid":"fdcbddf6-1b49-11f1-80b0-7392062373f9","created":"2026-03-08T23:53:59.990761+0000","modified":"2026-03-08T23:56:17.763435+0000","last_up_change":"2026-03-08T23:56:09.724834+0000","last_in_change":"2026-03-08T23:55:59.871620+0000","flags":"sortbitwise,recovery_deletes,purged_snapdirs,pglog_hardlimit","flags_num":5799936,"flags_set":["pglog_hardlimit","purged_snapdirs","recovery_deletes","sortbitwise"],"crush_version":18,"full_ratio":0.94999998807907104,"backfillfull_ratio":0.89999997615814209,"nearfull_ratio":0.85000002384185791,"cluster_snapshot":"","pool_max":1,"max_osd":8,"require_min_compat_client":"luminous","min_compat_client":"jewel","require_osd_release":"quincy","pools":[{"pool":1,"pool_name":".mgr","create_time":"2026-03-08T23:55:19.841597+0000","flags":1,"flags_names":"hashpspool","type":1,"size":3,"min_size":2,"crush_rule":0,"peering_crush_bucket_count":0,"peering_crush_bucket_target":0,"peering_crush_bucket_barrier":0,"peering_crush_bucket_mandatory_member":2147483647,"object_hash":2,"pg_autoscale_mode":"off","pg_num":1,"pg_placement_num":1,"pg_placement_num_target":1,"pg_num_target":1,"pg_num_pending":1,"last_pg_merge_meta":{"source_pgid":"0.0","ready_epoch":0,"last_epoch_started":0,"last_epoch_clean":0,"source_version":"0'0","target_version":"0'0"},"last_change":"20","last_force_op_resend":"0","last_force_op_resend_prenautilus":"0","last_force_op_resend_preluminous":"0","auid":0,"snap_mode":"selfmanaged","snap_seq":0,"snap_epoch":0,"pool_snaps":[],"removed_snaps":"[]","quota_max_bytes":0,"quota_max_objects":0,"tiers":[],"tier_of":-1,"read_tier":-1,"write_tier":-1,"cache_mode":"none","target_max_bytes":0,"target_max_objects":0,"cache_target_dirty_ratio_micro":400000,"cache_target_dirty_high_ratio_micro":600000,"cache_target_full_ratio_micro":800000,"cache_min_flush_age":0,"cache_min_evict_age":0,"erasure_code_profile":"","hit_set_params":{"type":"none"},"hit_set_period":0,"hit_set_count":0,"use_gmt_hitset":true,"min_read_recency_for_promote":0,"min_write_recency_for_promote":0,"hit_set_grade_decay_rate":0,"hit_set_search_last_n":0,"grade_table":[],"stripe_width":0,"expected_num_objects":0,"fast_read":false,"options":{"pg_num_max":32,"pg_num_min":1},"application_metadata":{"mgr":{}}}],"osds":[{"osd":0,"uuid":"d3ce7c0b-7841-417d-8412-02f631c2946d","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":8,"up_thru":42,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6802","nonce":959618434},{"type":"v1","addr":"192.168.123.104:6803","nonce":959618434}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6804","nonce":959618434},{"type":"v1","addr":"192.168.123.104:6805","nonce":959618434}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6808","nonce":959618434},{"type":"v1","addr":"192.168.123.104:6809","nonce":959618434}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6806","nonce":959618434},{"type":"v1","addr":"192.168.123.104:6807","nonce":959618434}]},"public_addr":"192.168.123.104:6803/959618434","cluster_addr":"192.168.123.104:6805/959618434","heartbeat_back_addr":"192.168.123.104:6809/959618434","heartbeat_front_addr":"192.168.123.104:6807/959618434","state":["exists","up"]},{"osd":1,"uuid":"75d29058-61cd-44da-9ebc-7516b509075d","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":12,"up_thru":28,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6810","nonce":1772469673},{"type":"v1","addr":"192.168.123.104:6811","nonce":1772469673}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6812","nonce":1772469673},{"type":"v1","addr":"192.168.123.104:6813","nonce":1772469673}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6816","nonce":1772469673},{"type":"v1","addr":"192.168.123.104:6817","nonce":1772469673}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6814","nonce":1772469673},{"type":"v1","addr":"192.168.123.104:6815","nonce":1772469673}]},"public_addr":"192.168.123.104:6811/1772469673","cluster_addr":"192.168.123.104:6813/1772469673","heartbeat_back_addr":"192.168.123.104:6817/1772469673","heartbeat_front_addr":"192.168.123.104:6815/1772469673","state":["exists","up"]},{"osd":2,"uuid":"53adb85c-2242-4b5e-a3ed-dfb1b448b743","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":17,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6818","nonce":1494796243},{"type":"v1","addr":"192.168.123.104:6819","nonce":1494796243}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6820","nonce":1494796243},{"type":"v1","addr":"192.168.123.104:6821","nonce":1494796243}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6824","nonce":1494796243},{"type":"v1","addr":"192.168.123.104:6825","nonce":1494796243}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6822","nonce":1494796243},{"type":"v1","addr":"192.168.123.104:6823","nonce":1494796243}]},"public_addr":"192.168.123.104:6819/1494796243","cluster_addr":"192.168.123.104:6821/1494796243","heartbeat_back_addr":"192.168.123.104:6825/1494796243","heartbeat_front_addr":"192.168.123.104:6823/1494796243","state":["exists","up"]},{"osd":3,"uuid":"ac348a8b-4e4c-4ce9-84cd-4eafa34927bb","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":23,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6826","nonce":2755643936},{"type":"v1","addr":"192.168.123.104:6827","nonce":2755643936}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6828","nonce":2755643936},{"type":"v1","addr":"192.168.123.104:6829","nonce":2755643936}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6832","nonce":2755643936},{"type":"v1","addr":"192.168.123.104:6833","nonce":2755643936}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6830","nonce":2755643936},{"type":"v1","addr":"192.168.123.104:6831","nonce":2755643936}]},"public_addr":"192.168.123.104:6827/2755643936","cluster_addr":"192.168.123.104:6829/2755643936","heartbeat_back_addr":"192.168.123.104:6833/2755643936","heartbeat_front_addr":"192.168.123.104:6831/2755643936","state":["exists","up"]},{"osd":4,"uuid":"ffd541f9-68f9-454d-acfc-1323f62f60a0","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":27,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.110:6800","nonce":2012640669},{"type":"v1","addr":"192.168.123.110:6801","nonce":2012640669}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.110:6802","nonce":2012640669},{"type":"v1","addr":"192.168.123.110:6803","nonce":2012640669}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.110:6806","nonce":2012640669},{"type":"v1","addr":"192.168.123.110:6807","nonce":2012640669}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.110:6804","nonce":2012640669},{"type":"v1","addr":"192.168.123.110:6805","nonce":2012640669}]},"public_addr":"192.168.123.110:6801/2012640669","cluster_addr":"192.168.123.110:6803/2012640669","heartbeat_back_addr":"192.168.123.110:6807/2012640669","heartbeat_front_addr":"192.168.123.110:6805/2012640669","state":["exists","up"]},{"osd":5,"uuid":"5efb3808-0928-47a5-97bc-ecad3a99a5e9","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":32,"up_thru":33,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.110:6808","nonce":614383691},{"type":"v1","addr":"192.168.123.110:6809","nonce":614383691}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.110:6810","nonce":614383691},{"type":"v1","addr":"192.168.123.110:6811","nonce":614383691}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.110:6814","nonce":614383691},{"type":"v1","addr":"192.168.123.110:6815","nonce":614383691}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.110:6812","nonce":614383691},{"type":"v1","addr":"192.168.123.110:6813","nonce":614383691}]},"public_addr":"192.168.123.110:6809/614383691","cluster_addr":"192.168.123.110:6811/614383691","heartbeat_back_addr":"192.168.123.110:6815/614383691","heartbeat_front_addr":"192.168.123.110:6813/614383691","state":["exists","up"]},{"osd":6,"uuid":"e5c2cd5a-74db-44b2-8a4f-525ffaba40f9","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":37,"up_thru":38,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.110:6816","nonce":2965253276},{"type":"v1","addr":"192.168.123.110:6817","nonce":2965253276}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.110:6818","nonce":2965253276},{"type":"v1","addr":"192.168.123.110:6819","nonce":2965253276}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.110:6822","nonce":2965253276},{"type":"v1","addr":"192.168.123.110:6823","nonce":2965253276}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.110:6820","nonce":2965253276},{"type":"v1","addr":"192.168.123.110:6821","nonce":2965253276}]},"public_addr":"192.168.123.110:6817/2965253276","cluster_addr":"192.168.123.110:6819/2965253276","heartbeat_back_addr":"192.168.123.110:6823/2965253276","heartbeat_front_addr":"192.168.123.110:6821/2965253276","state":["exists","up"]},{"osd":7,"uuid":"c2a6868a-a44a-4a09-a55c-d1145ef3d398","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":42,"up_thru":43,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.110:6824","nonce":3776598559},{"type":"v1","addr":"192.168.123.110:6825","nonce":3776598559}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.110:6826","nonce":3776598559},{"type":"v1","addr":"192.168.123.110:6827","nonce":3776598559}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.110:6830","nonce":3776598559},{"type":"v1","addr":"192.168.123.110:6831","nonce":3776598559}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.110:6828","nonce":3776598559},{"type":"v1","addr":"192.168.123.110:6829","nonce":3776598559}]},"public_addr":"192.168.123.110:6825/3776598559","cluster_addr":"192.168.123.110:6827/3776598559","heartbeat_back_addr":"192.168.123.110:6831/3776598559","heartbeat_front_addr":"192.168.123.110:6829/3776598559","state":["exists","up"]}],"osd_xinfo":[{"osd":0,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-08T23:54:59.250466+0000","dead_epoch":0},{"osd":1,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-08T23:55:07.947404+0000","dead_epoch":0},{"osd":2,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-08T23:55:17.425660+0000","dead_epoch":0},{"osd":3,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-08T23:55:27.200744+0000","dead_epoch":0},{"osd":4,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-08T23:55:35.700171+0000","dead_epoch":0},{"osd":5,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-08T23:55:45.216440+0000","dead_epoch":0},{"osd":6,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-08T23:55:57.462852+0000","dead_epoch":0},{"osd":7,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-08T23:56:08.399682+0000","dead_epoch":0}],"pg_upmap":[],"pg_upmap_items":[],"pg_temp":[],"primary_temp":[],"blocklist":{"192.168.123.104:0/392608918":"2026-03-09T23:56:17.763396+0000","192.168.123.104:6800/1056094101":"2026-03-09T23:56:17.763396+0000","192.168.123.104:0/2797711501":"2026-03-09T23:56:17.763396+0000","192.168.123.104:0/285198153":"2026-03-09T23:56:17.763396+0000","192.168.123.104:6801/2282925904":"2026-03-09T23:54:25.574455+0000","192.168.123.104:0/1020657381":"2026-03-09T23:56:17.763396+0000","192.168.123.104:0/4258028872":"2026-03-09T23:54:25.574455+0000","192.168.123.104:0/3816463695":"2026-03-09T23:54:25.574455+0000","192.168.123.104:0/2988861187":"2026-03-09T23:54:25.574455+0000","192.168.123.104:0/1723664430":"2026-03-09T23:54:15.436245+0000","192.168.123.104:6801/1056094101":"2026-03-09T23:56:17.763396+0000","192.168.123.104:0/2663707440":"2026-03-09T23:54:15.436245+0000","192.168.123.104:0/1412084899":"2026-03-09T23:54:15.436245+0000","192.168.123.104:6800/2282925904":"2026-03-09T23:54:25.574455+0000","192.168.123.104:6800/3942489037":"2026-03-09T23:54:15.436245+0000","192.168.123.104:6801/3942489037":"2026-03-09T23:54:15.436245+0000"},"erasure_code_profiles":{"default":{"crush-failure-domain":"osd","k":"2","m":"1","plugin":"jerasure","technique":"reed_sol_van"}},"removed_snaps_queue":[],"new_removed_snaps":[],"new_purged_snaps":[],"crush_node_flags":{},"device_class_flags":{},"stretch_mode":{"stretch_mode_enabled":false,"stretch_bucket_count":0,"degraded_stretch_mode":0,"recovering_stretch_mode":0,"stretch_mode_bucket":0}} 2026-03-08T23:56:22.604 INFO:tasks.cephadm.ceph_manager.ceph:all up! 2026-03-08T23:56:22.604 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -- ceph osd dump --format=json 2026-03-08T23:56:22.700 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 08 23:56:22 vm04 bash[65268]: Copying config sha256:1dbe0e931976487e20e5cfb272087e08a9779c88fd5e9617ed7042dd9751ec26 2026-03-08T23:56:22.700 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 08 23:56:22 vm04 bash[65268]: Writing manifest to image destination 2026-03-08T23:56:22.800 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/mon.a/config 2026-03-08T23:56:22.962 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:22 vm04 ceph-mon[51053]: pgmap v5: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-08T23:56:22.962 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:22 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/3382896795' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-08T23:56:22.962 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:22 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:22.965 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 08 23:56:22 vm04 podman[65268]: 2026-03-08 23:56:22.71469901 +0000 UTC m=+2.277119496 container create ff6d0adb33b76901daedc9dc18647e2a27f7e667d5ba9c47e9219040edd50abe (image=quay.io/prometheus/node-exporter:v1.3.1, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a, maintainer=The Prometheus Authors ) 2026-03-08T23:56:22.965 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 08 23:56:22 vm04 podman[65268]: 2026-03-08 23:56:22.746639794 +0000 UTC m=+2.309060290 container init ff6d0adb33b76901daedc9dc18647e2a27f7e667d5ba9c47e9219040edd50abe (image=quay.io/prometheus/node-exporter:v1.3.1, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a, maintainer=The Prometheus Authors ) 2026-03-08T23:56:22.965 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 08 23:56:22 vm04 podman[65268]: 2026-03-08 23:56:22.750968569 +0000 UTC m=+2.313389055 container start ff6d0adb33b76901daedc9dc18647e2a27f7e667d5ba9c47e9219040edd50abe (image=quay.io/prometheus/node-exporter:v1.3.1, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a, maintainer=The Prometheus Authors ) 2026-03-08T23:56:22.965 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 08 23:56:22 vm04 bash[65268]: ff6d0adb33b76901daedc9dc18647e2a27f7e667d5ba9c47e9219040edd50abe 2026-03-08T23:56:22.965 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 08 23:56:22 vm04 podman[65268]: 2026-03-08 23:56:22.708337402 +0000 UTC m=+2.270757899 image pull 1dbe0e931976487e20e5cfb272087e08a9779c88fd5e9617ed7042dd9751ec26 quay.io/prometheus/node-exporter:v1.3.1 2026-03-08T23:56:22.965 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 08 23:56:22 vm04 systemd[1]: Started Ceph node-exporter.a for fdcbddf6-1b49-11f1-80b0-7392062373f9. 2026-03-08T23:56:22.965 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 08 23:56:22 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[65626]: ts=2026-03-08T23:56:22.760Z caller=node_exporter.go:182 level=info msg="Starting node_exporter" version="(version=1.3.1, branch=HEAD, revision=a2321e7b940ddcff26873612bccdf7cd4c42b6b6)" 2026-03-08T23:56:22.965 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 08 23:56:22 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[65626]: ts=2026-03-08T23:56:22.760Z caller=node_exporter.go:183 level=info msg="Build context" build_context="(go=go1.17.3, user=root@243aafa5525c, date=20211205-11:09:49)" 2026-03-08T23:56:22.965 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 08 23:56:22 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[65626]: ts=2026-03-08T23:56:22.760Z caller=filesystem_common.go:111 level=info collector=filesystem msg="Parsed flag --collector.filesystem.mount-points-exclude" flag=^/(dev|proc|run/credentials/.+|sys|var/lib/docker/.+)($|/) 2026-03-08T23:56:22.965 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 08 23:56:22 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[65626]: ts=2026-03-08T23:56:22.761Z caller=filesystem_common.go:113 level=info collector=filesystem msg="Parsed flag --collector.filesystem.fs-types-exclude" flag=^(autofs|binfmt_misc|bpf|cgroup2?|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|iso9660|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|sysfs|tracefs)$ 2026-03-08T23:56:22.965 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 08 23:56:22 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[65626]: ts=2026-03-08T23:56:22.761Z caller=node_exporter.go:108 level=info msg="Enabled collectors" 2026-03-08T23:56:22.965 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 08 23:56:22 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[65626]: ts=2026-03-08T23:56:22.761Z caller=node_exporter.go:115 level=info collector=arp 2026-03-08T23:56:22.965 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 08 23:56:22 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[65626]: ts=2026-03-08T23:56:22.761Z caller=node_exporter.go:115 level=info collector=bcache 2026-03-08T23:56:22.965 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 08 23:56:22 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[65626]: ts=2026-03-08T23:56:22.761Z caller=node_exporter.go:115 level=info collector=bonding 2026-03-08T23:56:22.965 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 08 23:56:22 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[65626]: ts=2026-03-08T23:56:22.761Z caller=node_exporter.go:115 level=info collector=btrfs 2026-03-08T23:56:22.965 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 08 23:56:22 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[65626]: ts=2026-03-08T23:56:22.761Z caller=node_exporter.go:115 level=info collector=conntrack 2026-03-08T23:56:22.965 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 08 23:56:22 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[65626]: ts=2026-03-08T23:56:22.761Z caller=node_exporter.go:115 level=info collector=cpu 2026-03-08T23:56:22.965 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 08 23:56:22 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[65626]: ts=2026-03-08T23:56:22.761Z caller=node_exporter.go:115 level=info collector=cpufreq 2026-03-08T23:56:22.965 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 08 23:56:22 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[65626]: ts=2026-03-08T23:56:22.761Z caller=node_exporter.go:115 level=info collector=diskstats 2026-03-08T23:56:22.965 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 08 23:56:22 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[65626]: ts=2026-03-08T23:56:22.761Z caller=node_exporter.go:115 level=info collector=dmi 2026-03-08T23:56:22.965 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 08 23:56:22 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[65626]: ts=2026-03-08T23:56:22.761Z caller=node_exporter.go:115 level=info collector=edac 2026-03-08T23:56:22.965 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 08 23:56:22 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[65626]: ts=2026-03-08T23:56:22.761Z caller=node_exporter.go:115 level=info collector=entropy 2026-03-08T23:56:22.965 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 08 23:56:22 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[65626]: ts=2026-03-08T23:56:22.761Z caller=node_exporter.go:115 level=info collector=fibrechannel 2026-03-08T23:56:22.965 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 08 23:56:22 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[65626]: ts=2026-03-08T23:56:22.761Z caller=node_exporter.go:115 level=info collector=filefd 2026-03-08T23:56:22.965 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 08 23:56:22 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[65626]: ts=2026-03-08T23:56:22.761Z caller=node_exporter.go:115 level=info collector=filesystem 2026-03-08T23:56:22.965 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 08 23:56:22 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[65626]: ts=2026-03-08T23:56:22.761Z caller=node_exporter.go:115 level=info collector=hwmon 2026-03-08T23:56:22.965 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 08 23:56:22 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[65626]: ts=2026-03-08T23:56:22.761Z caller=node_exporter.go:115 level=info collector=infiniband 2026-03-08T23:56:22.965 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 08 23:56:22 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[65626]: ts=2026-03-08T23:56:22.761Z caller=node_exporter.go:115 level=info collector=ipvs 2026-03-08T23:56:22.965 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 08 23:56:22 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[65626]: ts=2026-03-08T23:56:22.761Z caller=node_exporter.go:115 level=info collector=loadavg 2026-03-08T23:56:22.965 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 08 23:56:22 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[65626]: ts=2026-03-08T23:56:22.761Z caller=node_exporter.go:115 level=info collector=mdadm 2026-03-08T23:56:22.965 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 08 23:56:22 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[65626]: ts=2026-03-08T23:56:22.761Z caller=node_exporter.go:115 level=info collector=meminfo 2026-03-08T23:56:22.965 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 08 23:56:22 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[65626]: ts=2026-03-08T23:56:22.761Z caller=node_exporter.go:115 level=info collector=netclass 2026-03-08T23:56:22.965 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 08 23:56:22 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[65626]: ts=2026-03-08T23:56:22.761Z caller=node_exporter.go:115 level=info collector=netdev 2026-03-08T23:56:22.965 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 08 23:56:22 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[65626]: ts=2026-03-08T23:56:22.761Z caller=node_exporter.go:115 level=info collector=netstat 2026-03-08T23:56:22.965 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 08 23:56:22 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[65626]: ts=2026-03-08T23:56:22.761Z caller=node_exporter.go:115 level=info collector=nfs 2026-03-08T23:56:22.965 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 08 23:56:22 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[65626]: ts=2026-03-08T23:56:22.761Z caller=node_exporter.go:115 level=info collector=nfsd 2026-03-08T23:56:22.965 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 08 23:56:22 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[65626]: ts=2026-03-08T23:56:22.761Z caller=node_exporter.go:115 level=info collector=nvme 2026-03-08T23:56:22.965 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 08 23:56:22 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[65626]: ts=2026-03-08T23:56:22.761Z caller=node_exporter.go:115 level=info collector=os 2026-03-08T23:56:22.965 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 08 23:56:22 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[65626]: ts=2026-03-08T23:56:22.761Z caller=node_exporter.go:115 level=info collector=powersupplyclass 2026-03-08T23:56:22.965 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 08 23:56:22 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[65626]: ts=2026-03-08T23:56:22.761Z caller=node_exporter.go:115 level=info collector=pressure 2026-03-08T23:56:22.965 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 08 23:56:22 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[65626]: ts=2026-03-08T23:56:22.761Z caller=node_exporter.go:115 level=info collector=rapl 2026-03-08T23:56:22.965 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 08 23:56:22 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[65626]: ts=2026-03-08T23:56:22.761Z caller=node_exporter.go:115 level=info collector=schedstat 2026-03-08T23:56:22.965 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 08 23:56:22 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[65626]: ts=2026-03-08T23:56:22.761Z caller=node_exporter.go:115 level=info collector=sockstat 2026-03-08T23:56:22.965 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 08 23:56:22 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[65626]: ts=2026-03-08T23:56:22.761Z caller=node_exporter.go:115 level=info collector=softnet 2026-03-08T23:56:22.966 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 08 23:56:22 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[65626]: ts=2026-03-08T23:56:22.761Z caller=node_exporter.go:115 level=info collector=stat 2026-03-08T23:56:22.966 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 08 23:56:22 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[65626]: ts=2026-03-08T23:56:22.761Z caller=node_exporter.go:115 level=info collector=tapestats 2026-03-08T23:56:22.966 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 08 23:56:22 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[65626]: ts=2026-03-08T23:56:22.761Z caller=node_exporter.go:115 level=info collector=textfile 2026-03-08T23:56:22.966 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 08 23:56:22 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[65626]: ts=2026-03-08T23:56:22.761Z caller=node_exporter.go:115 level=info collector=thermal_zone 2026-03-08T23:56:22.966 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 08 23:56:22 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[65626]: ts=2026-03-08T23:56:22.761Z caller=node_exporter.go:115 level=info collector=time 2026-03-08T23:56:22.966 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 08 23:56:22 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[65626]: ts=2026-03-08T23:56:22.761Z caller=node_exporter.go:115 level=info collector=udp_queues 2026-03-08T23:56:22.966 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 08 23:56:22 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[65626]: ts=2026-03-08T23:56:22.761Z caller=node_exporter.go:115 level=info collector=uname 2026-03-08T23:56:22.966 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 08 23:56:22 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[65626]: ts=2026-03-08T23:56:22.761Z caller=node_exporter.go:115 level=info collector=vmstat 2026-03-08T23:56:22.966 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 08 23:56:22 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[65626]: ts=2026-03-08T23:56:22.761Z caller=node_exporter.go:115 level=info collector=xfs 2026-03-08T23:56:22.966 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 08 23:56:22 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[65626]: ts=2026-03-08T23:56:22.761Z caller=node_exporter.go:115 level=info collector=zfs 2026-03-08T23:56:22.966 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 08 23:56:22 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[65626]: ts=2026-03-08T23:56:22.761Z caller=node_exporter.go:199 level=info msg="Listening on" address=:9100 2026-03-08T23:56:22.966 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 08 23:56:22 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[65626]: ts=2026-03-08T23:56:22.761Z caller=tls_config.go:195 level=info msg="TLS is disabled." http2=false 2026-03-08T23:56:22.966 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:22 vm04 ceph-mon[46823]: pgmap v5: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-08T23:56:22.966 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:22 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/3382896795' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-08T23:56:22.966 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:22 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:23.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:22 vm10 ceph-mon[48982]: pgmap v5: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-08T23:56:23.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:22 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/3382896795' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-08T23:56:23.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:22 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:23.157 INFO:teuthology.orchestra.run.vm04.stdout: 2026-03-08T23:56:23.157 INFO:teuthology.orchestra.run.vm04.stdout:{"epoch":45,"fsid":"fdcbddf6-1b49-11f1-80b0-7392062373f9","created":"2026-03-08T23:53:59.990761+0000","modified":"2026-03-08T23:56:17.763435+0000","last_up_change":"2026-03-08T23:56:09.724834+0000","last_in_change":"2026-03-08T23:55:59.871620+0000","flags":"sortbitwise,recovery_deletes,purged_snapdirs,pglog_hardlimit","flags_num":5799936,"flags_set":["pglog_hardlimit","purged_snapdirs","recovery_deletes","sortbitwise"],"crush_version":18,"full_ratio":0.94999998807907104,"backfillfull_ratio":0.89999997615814209,"nearfull_ratio":0.85000002384185791,"cluster_snapshot":"","pool_max":1,"max_osd":8,"require_min_compat_client":"luminous","min_compat_client":"jewel","require_osd_release":"quincy","pools":[{"pool":1,"pool_name":".mgr","create_time":"2026-03-08T23:55:19.841597+0000","flags":1,"flags_names":"hashpspool","type":1,"size":3,"min_size":2,"crush_rule":0,"peering_crush_bucket_count":0,"peering_crush_bucket_target":0,"peering_crush_bucket_barrier":0,"peering_crush_bucket_mandatory_member":2147483647,"object_hash":2,"pg_autoscale_mode":"off","pg_num":1,"pg_placement_num":1,"pg_placement_num_target":1,"pg_num_target":1,"pg_num_pending":1,"last_pg_merge_meta":{"source_pgid":"0.0","ready_epoch":0,"last_epoch_started":0,"last_epoch_clean":0,"source_version":"0'0","target_version":"0'0"},"last_change":"20","last_force_op_resend":"0","last_force_op_resend_prenautilus":"0","last_force_op_resend_preluminous":"0","auid":0,"snap_mode":"selfmanaged","snap_seq":0,"snap_epoch":0,"pool_snaps":[],"removed_snaps":"[]","quota_max_bytes":0,"quota_max_objects":0,"tiers":[],"tier_of":-1,"read_tier":-1,"write_tier":-1,"cache_mode":"none","target_max_bytes":0,"target_max_objects":0,"cache_target_dirty_ratio_micro":400000,"cache_target_dirty_high_ratio_micro":600000,"cache_target_full_ratio_micro":800000,"cache_min_flush_age":0,"cache_min_evict_age":0,"erasure_code_profile":"","hit_set_params":{"type":"none"},"hit_set_period":0,"hit_set_count":0,"use_gmt_hitset":true,"min_read_recency_for_promote":0,"min_write_recency_for_promote":0,"hit_set_grade_decay_rate":0,"hit_set_search_last_n":0,"grade_table":[],"stripe_width":0,"expected_num_objects":0,"fast_read":false,"options":{"pg_num_max":32,"pg_num_min":1},"application_metadata":{"mgr":{}}}],"osds":[{"osd":0,"uuid":"d3ce7c0b-7841-417d-8412-02f631c2946d","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":8,"up_thru":42,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6802","nonce":959618434},{"type":"v1","addr":"192.168.123.104:6803","nonce":959618434}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6804","nonce":959618434},{"type":"v1","addr":"192.168.123.104:6805","nonce":959618434}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6808","nonce":959618434},{"type":"v1","addr":"192.168.123.104:6809","nonce":959618434}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6806","nonce":959618434},{"type":"v1","addr":"192.168.123.104:6807","nonce":959618434}]},"public_addr":"192.168.123.104:6803/959618434","cluster_addr":"192.168.123.104:6805/959618434","heartbeat_back_addr":"192.168.123.104:6809/959618434","heartbeat_front_addr":"192.168.123.104:6807/959618434","state":["exists","up"]},{"osd":1,"uuid":"75d29058-61cd-44da-9ebc-7516b509075d","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":12,"up_thru":28,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6810","nonce":1772469673},{"type":"v1","addr":"192.168.123.104:6811","nonce":1772469673}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6812","nonce":1772469673},{"type":"v1","addr":"192.168.123.104:6813","nonce":1772469673}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6816","nonce":1772469673},{"type":"v1","addr":"192.168.123.104:6817","nonce":1772469673}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6814","nonce":1772469673},{"type":"v1","addr":"192.168.123.104:6815","nonce":1772469673}]},"public_addr":"192.168.123.104:6811/1772469673","cluster_addr":"192.168.123.104:6813/1772469673","heartbeat_back_addr":"192.168.123.104:6817/1772469673","heartbeat_front_addr":"192.168.123.104:6815/1772469673","state":["exists","up"]},{"osd":2,"uuid":"53adb85c-2242-4b5e-a3ed-dfb1b448b743","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":17,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6818","nonce":1494796243},{"type":"v1","addr":"192.168.123.104:6819","nonce":1494796243}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6820","nonce":1494796243},{"type":"v1","addr":"192.168.123.104:6821","nonce":1494796243}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6824","nonce":1494796243},{"type":"v1","addr":"192.168.123.104:6825","nonce":1494796243}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6822","nonce":1494796243},{"type":"v1","addr":"192.168.123.104:6823","nonce":1494796243}]},"public_addr":"192.168.123.104:6819/1494796243","cluster_addr":"192.168.123.104:6821/1494796243","heartbeat_back_addr":"192.168.123.104:6825/1494796243","heartbeat_front_addr":"192.168.123.104:6823/1494796243","state":["exists","up"]},{"osd":3,"uuid":"ac348a8b-4e4c-4ce9-84cd-4eafa34927bb","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":23,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6826","nonce":2755643936},{"type":"v1","addr":"192.168.123.104:6827","nonce":2755643936}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6828","nonce":2755643936},{"type":"v1","addr":"192.168.123.104:6829","nonce":2755643936}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6832","nonce":2755643936},{"type":"v1","addr":"192.168.123.104:6833","nonce":2755643936}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.104:6830","nonce":2755643936},{"type":"v1","addr":"192.168.123.104:6831","nonce":2755643936}]},"public_addr":"192.168.123.104:6827/2755643936","cluster_addr":"192.168.123.104:6829/2755643936","heartbeat_back_addr":"192.168.123.104:6833/2755643936","heartbeat_front_addr":"192.168.123.104:6831/2755643936","state":["exists","up"]},{"osd":4,"uuid":"ffd541f9-68f9-454d-acfc-1323f62f60a0","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":27,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.110:6800","nonce":2012640669},{"type":"v1","addr":"192.168.123.110:6801","nonce":2012640669}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.110:6802","nonce":2012640669},{"type":"v1","addr":"192.168.123.110:6803","nonce":2012640669}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.110:6806","nonce":2012640669},{"type":"v1","addr":"192.168.123.110:6807","nonce":2012640669}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.110:6804","nonce":2012640669},{"type":"v1","addr":"192.168.123.110:6805","nonce":2012640669}]},"public_addr":"192.168.123.110:6801/2012640669","cluster_addr":"192.168.123.110:6803/2012640669","heartbeat_back_addr":"192.168.123.110:6807/2012640669","heartbeat_front_addr":"192.168.123.110:6805/2012640669","state":["exists","up"]},{"osd":5,"uuid":"5efb3808-0928-47a5-97bc-ecad3a99a5e9","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":32,"up_thru":33,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.110:6808","nonce":614383691},{"type":"v1","addr":"192.168.123.110:6809","nonce":614383691}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.110:6810","nonce":614383691},{"type":"v1","addr":"192.168.123.110:6811","nonce":614383691}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.110:6814","nonce":614383691},{"type":"v1","addr":"192.168.123.110:6815","nonce":614383691}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.110:6812","nonce":614383691},{"type":"v1","addr":"192.168.123.110:6813","nonce":614383691}]},"public_addr":"192.168.123.110:6809/614383691","cluster_addr":"192.168.123.110:6811/614383691","heartbeat_back_addr":"192.168.123.110:6815/614383691","heartbeat_front_addr":"192.168.123.110:6813/614383691","state":["exists","up"]},{"osd":6,"uuid":"e5c2cd5a-74db-44b2-8a4f-525ffaba40f9","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":37,"up_thru":38,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.110:6816","nonce":2965253276},{"type":"v1","addr":"192.168.123.110:6817","nonce":2965253276}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.110:6818","nonce":2965253276},{"type":"v1","addr":"192.168.123.110:6819","nonce":2965253276}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.110:6822","nonce":2965253276},{"type":"v1","addr":"192.168.123.110:6823","nonce":2965253276}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.110:6820","nonce":2965253276},{"type":"v1","addr":"192.168.123.110:6821","nonce":2965253276}]},"public_addr":"192.168.123.110:6817/2965253276","cluster_addr":"192.168.123.110:6819/2965253276","heartbeat_back_addr":"192.168.123.110:6823/2965253276","heartbeat_front_addr":"192.168.123.110:6821/2965253276","state":["exists","up"]},{"osd":7,"uuid":"c2a6868a-a44a-4a09-a55c-d1145ef3d398","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":42,"up_thru":43,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.110:6824","nonce":3776598559},{"type":"v1","addr":"192.168.123.110:6825","nonce":3776598559}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.110:6826","nonce":3776598559},{"type":"v1","addr":"192.168.123.110:6827","nonce":3776598559}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.110:6830","nonce":3776598559},{"type":"v1","addr":"192.168.123.110:6831","nonce":3776598559}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.110:6828","nonce":3776598559},{"type":"v1","addr":"192.168.123.110:6829","nonce":3776598559}]},"public_addr":"192.168.123.110:6825/3776598559","cluster_addr":"192.168.123.110:6827/3776598559","heartbeat_back_addr":"192.168.123.110:6831/3776598559","heartbeat_front_addr":"192.168.123.110:6829/3776598559","state":["exists","up"]}],"osd_xinfo":[{"osd":0,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-08T23:54:59.250466+0000","dead_epoch":0},{"osd":1,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-08T23:55:07.947404+0000","dead_epoch":0},{"osd":2,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-08T23:55:17.425660+0000","dead_epoch":0},{"osd":3,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-08T23:55:27.200744+0000","dead_epoch":0},{"osd":4,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-08T23:55:35.700171+0000","dead_epoch":0},{"osd":5,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-08T23:55:45.216440+0000","dead_epoch":0},{"osd":6,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-08T23:55:57.462852+0000","dead_epoch":0},{"osd":7,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-08T23:56:08.399682+0000","dead_epoch":0}],"pg_upmap":[],"pg_upmap_items":[],"pg_temp":[],"primary_temp":[],"blocklist":{"192.168.123.104:0/392608918":"2026-03-09T23:56:17.763396+0000","192.168.123.104:6800/1056094101":"2026-03-09T23:56:17.763396+0000","192.168.123.104:0/2797711501":"2026-03-09T23:56:17.763396+0000","192.168.123.104:0/285198153":"2026-03-09T23:56:17.763396+0000","192.168.123.104:6801/2282925904":"2026-03-09T23:54:25.574455+0000","192.168.123.104:0/1020657381":"2026-03-09T23:56:17.763396+0000","192.168.123.104:0/4258028872":"2026-03-09T23:54:25.574455+0000","192.168.123.104:0/3816463695":"2026-03-09T23:54:25.574455+0000","192.168.123.104:0/2988861187":"2026-03-09T23:54:25.574455+0000","192.168.123.104:0/1723664430":"2026-03-09T23:54:15.436245+0000","192.168.123.104:6801/1056094101":"2026-03-09T23:56:17.763396+0000","192.168.123.104:0/2663707440":"2026-03-09T23:54:15.436245+0000","192.168.123.104:0/1412084899":"2026-03-09T23:54:15.436245+0000","192.168.123.104:6800/2282925904":"2026-03-09T23:54:25.574455+0000","192.168.123.104:6800/3942489037":"2026-03-09T23:54:15.436245+0000","192.168.123.104:6801/3942489037":"2026-03-09T23:54:15.436245+0000"},"erasure_code_profiles":{"default":{"crush-failure-domain":"osd","k":"2","m":"1","plugin":"jerasure","technique":"reed_sol_van"}},"removed_snaps_queue":[],"new_removed_snaps":[],"new_purged_snaps":[],"crush_node_flags":{},"device_class_flags":{},"stretch_mode":{"stretch_mode_enabled":false,"stretch_bucket_count":0,"degraded_stretch_mode":0,"recovering_stretch_mode":0,"stretch_mode_bucket":0}} 2026-03-08T23:56:23.207 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -- ceph tell osd.0 flush_pg_stats 2026-03-08T23:56:23.208 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -- ceph tell osd.1 flush_pg_stats 2026-03-08T23:56:23.208 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -- ceph tell osd.2 flush_pg_stats 2026-03-08T23:56:23.208 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -- ceph tell osd.3 flush_pg_stats 2026-03-08T23:56:23.208 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -- ceph tell osd.4 flush_pg_stats 2026-03-08T23:56:23.208 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -- ceph tell osd.5 flush_pg_stats 2026-03-08T23:56:23.208 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -- ceph tell osd.6 flush_pg_stats 2026-03-08T23:56:23.208 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -- ceph tell osd.7 flush_pg_stats 2026-03-08T23:56:23.359 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 08 23:56:23 vm10 systemd[1]: Starting Ceph node-exporter.b for fdcbddf6-1b49-11f1-80b0-7392062373f9... 2026-03-08T23:56:23.827 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 08 23:56:23 vm10 bash[63412]: Trying to pull quay.io/prometheus/node-exporter:v1.3.1... 2026-03-08T23:56:23.843 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/mon.a/config 2026-03-08T23:56:23.935 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/mon.a/config 2026-03-08T23:56:23.945 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/mon.a/config 2026-03-08T23:56:23.951 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/mon.a/config 2026-03-08T23:56:23.953 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/mon.a/config 2026-03-08T23:56:23.963 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/mon.a/config 2026-03-08T23:56:23.980 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/mon.a/config 2026-03-08T23:56:24.115 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:23 vm04 ceph-mon[46823]: Deploying daemon node-exporter.b on vm10 2026-03-08T23:56:24.116 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:23 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/2397216710' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-08T23:56:24.116 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:23 vm04 ceph-mon[51053]: Deploying daemon node-exporter.b on vm10 2026-03-08T23:56:24.116 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:23 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/2397216710' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-08T23:56:24.140 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/mon.a/config 2026-03-08T23:56:24.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:23 vm10 ceph-mon[48982]: Deploying daemon node-exporter.b on vm10 2026-03-08T23:56:24.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:23 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/2397216710' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-08T23:56:24.889 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:24 vm04 ceph-mon[46823]: pgmap v6: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-08T23:56:24.889 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:24 vm04 ceph-mon[51053]: pgmap v6: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-08T23:56:25.016 INFO:teuthology.orchestra.run.vm04.stdout:73014444046 2026-03-08T23:56:25.016 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -- ceph osd last-stat-seq osd.2 2026-03-08T23:56:25.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:24 vm10 ceph-mon[48982]: pgmap v6: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-08T23:56:25.077 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 08 23:56:24 vm10 bash[63412]: Getting image source signatures 2026-03-08T23:56:25.077 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 08 23:56:24 vm10 bash[63412]: Copying blob sha256:b5db1e299295edf3005515ab7879c1df64a33c185d3a7a23aa4dcaa17d26f7b3 2026-03-08T23:56:25.077 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 08 23:56:24 vm10 bash[63412]: Copying blob sha256:b45d31ee2d7f9f452678a85b0c837c29e12089f31ee8dbac6c8c24dfa4054a30 2026-03-08T23:56:25.077 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 08 23:56:24 vm10 bash[63412]: Copying blob sha256:aa2a8d90b84cb2a9c422e7005cd166a008ccf22ef5d7d4f07128478585ce35ea 2026-03-08T23:56:25.177 INFO:teuthology.orchestra.run.vm04.stdout:51539607568 2026-03-08T23:56:25.177 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -- ceph osd last-stat-seq osd.1 2026-03-08T23:56:25.656 INFO:teuthology.orchestra.run.vm04.stdout:137438953481 2026-03-08T23:56:25.656 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -- ceph osd last-stat-seq osd.5 2026-03-08T23:56:25.676 INFO:teuthology.orchestra.run.vm04.stdout:98784247820 2026-03-08T23:56:25.676 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -- ceph osd last-stat-seq osd.3 2026-03-08T23:56:25.817 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/mon.a/config 2026-03-08T23:56:25.828 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 08 23:56:25 vm10 bash[63412]: Copying config sha256:1dbe0e931976487e20e5cfb272087e08a9779c88fd5e9617ed7042dd9751ec26 2026-03-08T23:56:25.829 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 08 23:56:25 vm10 bash[63412]: Writing manifest to image destination 2026-03-08T23:56:25.829 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 08 23:56:25 vm10 podman[63412]: 2026-03-08 23:56:25.493480628 +0000 UTC m=+2.045390325 container create c055c663d1e800ade894d290e0fac67be676413cf267a40693df5ff526971e1b (image=quay.io/prometheus/node-exporter:v1.3.1, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b, maintainer=The Prometheus Authors ) 2026-03-08T23:56:25.829 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 08 23:56:25 vm10 podman[63412]: 2026-03-08 23:56:25.520625468 +0000 UTC m=+2.072535175 container init c055c663d1e800ade894d290e0fac67be676413cf267a40693df5ff526971e1b (image=quay.io/prometheus/node-exporter:v1.3.1, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b, maintainer=The Prometheus Authors ) 2026-03-08T23:56:25.829 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 08 23:56:25 vm10 podman[63412]: 2026-03-08 23:56:25.524573867 +0000 UTC m=+2.076483564 container start c055c663d1e800ade894d290e0fac67be676413cf267a40693df5ff526971e1b (image=quay.io/prometheus/node-exporter:v1.3.1, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b, maintainer=The Prometheus Authors ) 2026-03-08T23:56:25.829 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 08 23:56:25 vm10 bash[63412]: c055c663d1e800ade894d290e0fac67be676413cf267a40693df5ff526971e1b 2026-03-08T23:56:25.829 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 08 23:56:25 vm10 podman[63412]: 2026-03-08 23:56:25.482647917 +0000 UTC m=+2.034557623 image pull 1dbe0e931976487e20e5cfb272087e08a9779c88fd5e9617ed7042dd9751ec26 quay.io/prometheus/node-exporter:v1.3.1 2026-03-08T23:56:25.829 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 08 23:56:25 vm10 systemd[1]: Started Ceph node-exporter.b for fdcbddf6-1b49-11f1-80b0-7392062373f9. 2026-03-08T23:56:25.829 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 08 23:56:25 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[63467]: ts=2026-03-08T23:56:25.532Z caller=node_exporter.go:182 level=info msg="Starting node_exporter" version="(version=1.3.1, branch=HEAD, revision=a2321e7b940ddcff26873612bccdf7cd4c42b6b6)" 2026-03-08T23:56:25.829 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 08 23:56:25 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[63467]: ts=2026-03-08T23:56:25.532Z caller=node_exporter.go:183 level=info msg="Build context" build_context="(go=go1.17.3, user=root@243aafa5525c, date=20211205-11:09:49)" 2026-03-08T23:56:25.829 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 08 23:56:25 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[63467]: ts=2026-03-08T23:56:25.533Z caller=filesystem_common.go:111 level=info collector=filesystem msg="Parsed flag --collector.filesystem.mount-points-exclude" flag=^/(dev|proc|run/credentials/.+|sys|var/lib/docker/.+)($|/) 2026-03-08T23:56:25.829 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 08 23:56:25 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[63467]: ts=2026-03-08T23:56:25.533Z caller=filesystem_common.go:113 level=info collector=filesystem msg="Parsed flag --collector.filesystem.fs-types-exclude" flag=^(autofs|binfmt_misc|bpf|cgroup2?|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|iso9660|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|sysfs|tracefs)$ 2026-03-08T23:56:25.829 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 08 23:56:25 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[63467]: ts=2026-03-08T23:56:25.533Z caller=node_exporter.go:108 level=info msg="Enabled collectors" 2026-03-08T23:56:25.829 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 08 23:56:25 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[63467]: ts=2026-03-08T23:56:25.533Z caller=node_exporter.go:115 level=info collector=arp 2026-03-08T23:56:25.829 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 08 23:56:25 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[63467]: ts=2026-03-08T23:56:25.533Z caller=node_exporter.go:115 level=info collector=bcache 2026-03-08T23:56:25.829 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 08 23:56:25 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[63467]: ts=2026-03-08T23:56:25.533Z caller=node_exporter.go:115 level=info collector=bonding 2026-03-08T23:56:25.829 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 08 23:56:25 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[63467]: ts=2026-03-08T23:56:25.533Z caller=node_exporter.go:115 level=info collector=btrfs 2026-03-08T23:56:25.829 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 08 23:56:25 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[63467]: ts=2026-03-08T23:56:25.533Z caller=node_exporter.go:115 level=info collector=conntrack 2026-03-08T23:56:25.829 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 08 23:56:25 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[63467]: ts=2026-03-08T23:56:25.533Z caller=node_exporter.go:115 level=info collector=cpu 2026-03-08T23:56:25.829 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 08 23:56:25 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[63467]: ts=2026-03-08T23:56:25.533Z caller=node_exporter.go:115 level=info collector=cpufreq 2026-03-08T23:56:25.829 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 08 23:56:25 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[63467]: ts=2026-03-08T23:56:25.533Z caller=node_exporter.go:115 level=info collector=diskstats 2026-03-08T23:56:25.829 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 08 23:56:25 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[63467]: ts=2026-03-08T23:56:25.533Z caller=node_exporter.go:115 level=info collector=dmi 2026-03-08T23:56:25.829 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 08 23:56:25 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[63467]: ts=2026-03-08T23:56:25.533Z caller=node_exporter.go:115 level=info collector=edac 2026-03-08T23:56:25.829 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 08 23:56:25 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[63467]: ts=2026-03-08T23:56:25.533Z caller=node_exporter.go:115 level=info collector=entropy 2026-03-08T23:56:25.829 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 08 23:56:25 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[63467]: ts=2026-03-08T23:56:25.533Z caller=node_exporter.go:115 level=info collector=fibrechannel 2026-03-08T23:56:25.829 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 08 23:56:25 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[63467]: ts=2026-03-08T23:56:25.533Z caller=node_exporter.go:115 level=info collector=filefd 2026-03-08T23:56:25.829 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 08 23:56:25 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[63467]: ts=2026-03-08T23:56:25.533Z caller=node_exporter.go:115 level=info collector=filesystem 2026-03-08T23:56:25.829 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 08 23:56:25 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[63467]: ts=2026-03-08T23:56:25.533Z caller=node_exporter.go:115 level=info collector=hwmon 2026-03-08T23:56:25.829 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 08 23:56:25 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[63467]: ts=2026-03-08T23:56:25.533Z caller=node_exporter.go:115 level=info collector=infiniband 2026-03-08T23:56:25.829 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 08 23:56:25 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[63467]: ts=2026-03-08T23:56:25.533Z caller=node_exporter.go:115 level=info collector=ipvs 2026-03-08T23:56:25.829 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 08 23:56:25 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[63467]: ts=2026-03-08T23:56:25.533Z caller=node_exporter.go:115 level=info collector=loadavg 2026-03-08T23:56:25.829 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 08 23:56:25 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[63467]: ts=2026-03-08T23:56:25.533Z caller=node_exporter.go:115 level=info collector=mdadm 2026-03-08T23:56:25.829 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 08 23:56:25 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[63467]: ts=2026-03-08T23:56:25.533Z caller=node_exporter.go:115 level=info collector=meminfo 2026-03-08T23:56:25.829 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 08 23:56:25 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[63467]: ts=2026-03-08T23:56:25.533Z caller=node_exporter.go:115 level=info collector=netclass 2026-03-08T23:56:25.829 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 08 23:56:25 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[63467]: ts=2026-03-08T23:56:25.533Z caller=node_exporter.go:115 level=info collector=netdev 2026-03-08T23:56:25.829 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 08 23:56:25 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[63467]: ts=2026-03-08T23:56:25.533Z caller=node_exporter.go:115 level=info collector=netstat 2026-03-08T23:56:25.829 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 08 23:56:25 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[63467]: ts=2026-03-08T23:56:25.533Z caller=node_exporter.go:115 level=info collector=nfs 2026-03-08T23:56:25.829 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 08 23:56:25 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[63467]: ts=2026-03-08T23:56:25.533Z caller=node_exporter.go:115 level=info collector=nfsd 2026-03-08T23:56:25.829 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 08 23:56:25 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[63467]: ts=2026-03-08T23:56:25.533Z caller=node_exporter.go:115 level=info collector=nvme 2026-03-08T23:56:25.829 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 08 23:56:25 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[63467]: ts=2026-03-08T23:56:25.533Z caller=node_exporter.go:115 level=info collector=os 2026-03-08T23:56:25.829 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 08 23:56:25 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[63467]: ts=2026-03-08T23:56:25.533Z caller=node_exporter.go:115 level=info collector=powersupplyclass 2026-03-08T23:56:25.829 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 08 23:56:25 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[63467]: ts=2026-03-08T23:56:25.533Z caller=node_exporter.go:115 level=info collector=pressure 2026-03-08T23:56:25.829 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 08 23:56:25 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[63467]: ts=2026-03-08T23:56:25.533Z caller=node_exporter.go:115 level=info collector=rapl 2026-03-08T23:56:25.829 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 08 23:56:25 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[63467]: ts=2026-03-08T23:56:25.533Z caller=node_exporter.go:115 level=info collector=schedstat 2026-03-08T23:56:25.829 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 08 23:56:25 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[63467]: ts=2026-03-08T23:56:25.533Z caller=node_exporter.go:115 level=info collector=sockstat 2026-03-08T23:56:25.829 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 08 23:56:25 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[63467]: ts=2026-03-08T23:56:25.533Z caller=node_exporter.go:115 level=info collector=softnet 2026-03-08T23:56:25.829 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 08 23:56:25 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[63467]: ts=2026-03-08T23:56:25.533Z caller=node_exporter.go:115 level=info collector=stat 2026-03-08T23:56:25.829 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 08 23:56:25 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[63467]: ts=2026-03-08T23:56:25.533Z caller=node_exporter.go:115 level=info collector=tapestats 2026-03-08T23:56:25.829 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 08 23:56:25 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[63467]: ts=2026-03-08T23:56:25.533Z caller=node_exporter.go:115 level=info collector=textfile 2026-03-08T23:56:25.829 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 08 23:56:25 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[63467]: ts=2026-03-08T23:56:25.533Z caller=node_exporter.go:115 level=info collector=thermal_zone 2026-03-08T23:56:25.829 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 08 23:56:25 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[63467]: ts=2026-03-08T23:56:25.533Z caller=node_exporter.go:115 level=info collector=time 2026-03-08T23:56:25.829 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 08 23:56:25 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[63467]: ts=2026-03-08T23:56:25.533Z caller=node_exporter.go:115 level=info collector=udp_queues 2026-03-08T23:56:25.829 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 08 23:56:25 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[63467]: ts=2026-03-08T23:56:25.533Z caller=node_exporter.go:115 level=info collector=uname 2026-03-08T23:56:25.829 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 08 23:56:25 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[63467]: ts=2026-03-08T23:56:25.533Z caller=node_exporter.go:115 level=info collector=vmstat 2026-03-08T23:56:25.829 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 08 23:56:25 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[63467]: ts=2026-03-08T23:56:25.533Z caller=node_exporter.go:115 level=info collector=xfs 2026-03-08T23:56:25.830 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 08 23:56:25 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[63467]: ts=2026-03-08T23:56:25.533Z caller=node_exporter.go:115 level=info collector=zfs 2026-03-08T23:56:25.830 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 08 23:56:25 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[63467]: ts=2026-03-08T23:56:25.533Z caller=node_exporter.go:199 level=info msg="Listening on" address=:9100 2026-03-08T23:56:25.830 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 08 23:56:25 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[63467]: ts=2026-03-08T23:56:25.533Z caller=tls_config.go:195 level=info msg="TLS is disabled." http2=false 2026-03-08T23:56:25.830 INFO:teuthology.orchestra.run.vm04.stdout:180388626436 2026-03-08T23:56:25.831 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -- ceph osd last-stat-seq osd.7 2026-03-08T23:56:25.843 INFO:teuthology.orchestra.run.vm04.stdout:34359738386 2026-03-08T23:56:25.843 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -- ceph osd last-stat-seq osd.0 2026-03-08T23:56:25.850 INFO:teuthology.orchestra.run.vm04.stdout:115964117003 2026-03-08T23:56:25.851 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -- ceph osd last-stat-seq osd.4 2026-03-08T23:56:25.905 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/mon.a/config 2026-03-08T23:56:25.920 INFO:teuthology.orchestra.run.vm04.stdout:158913789958 2026-03-08T23:56:25.920 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -- ceph osd last-stat-seq osd.6 2026-03-08T23:56:26.671 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/mon.a/config 2026-03-08T23:56:26.692 INFO:teuthology.orchestra.run.vm04.stdout:73014444046 2026-03-08T23:56:26.763 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/mon.a/config 2026-03-08T23:56:26.785 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:26 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:26.785 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:26 vm04 ceph-mon[51053]: Deploying daemon prometheus.a on vm10 2026-03-08T23:56:26.785 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:26 vm04 ceph-mon[51053]: pgmap v7: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-08T23:56:26.785 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:26 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:26.785 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:26 vm04 ceph-mon[46823]: Deploying daemon prometheus.a on vm10 2026-03-08T23:56:26.785 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:26 vm04 ceph-mon[46823]: pgmap v7: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-08T23:56:26.817 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/mon.a/config 2026-03-08T23:56:26.841 INFO:teuthology.orchestra.run.vm04.stdout:51539607568 2026-03-08T23:56:26.945 INFO:tasks.cephadm.ceph_manager.ceph:need seq 73014444046 got 73014444046 for osd.2 2026-03-08T23:56:26.945 DEBUG:teuthology.parallel:result is None 2026-03-08T23:56:26.954 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/mon.a/config 2026-03-08T23:56:26.975 INFO:tasks.cephadm.ceph_manager.ceph:need seq 51539607568 got 51539607568 for osd.1 2026-03-08T23:56:26.975 DEBUG:teuthology.parallel:result is None 2026-03-08T23:56:27.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:26 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:27.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:26 vm10 ceph-mon[48982]: Deploying daemon prometheus.a on vm10 2026-03-08T23:56:27.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:26 vm10 ceph-mon[48982]: pgmap v7: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-08T23:56:27.146 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/mon.a/config 2026-03-08T23:56:27.146 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/mon.a/config 2026-03-08T23:56:27.821 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:27 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/1150980824' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 2}]: dispatch 2026-03-08T23:56:27.821 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:27 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/3759583685' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 1}]: dispatch 2026-03-08T23:56:27.821 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:27 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/1150980824' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 2}]: dispatch 2026-03-08T23:56:27.821 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:27 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/3759583685' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 1}]: dispatch 2026-03-08T23:56:27.889 INFO:teuthology.orchestra.run.vm04.stdout:137438953481 2026-03-08T23:56:27.973 INFO:teuthology.orchestra.run.vm04.stdout:158913789958 2026-03-08T23:56:28.044 INFO:tasks.cephadm.ceph_manager.ceph:need seq 158913789958 got 158913789958 for osd.6 2026-03-08T23:56:28.044 DEBUG:teuthology.parallel:result is None 2026-03-08T23:56:28.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:27 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/1150980824' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 2}]: dispatch 2026-03-08T23:56:28.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:27 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/3759583685' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 1}]: dispatch 2026-03-08T23:56:28.145 INFO:tasks.cephadm.ceph_manager.ceph:need seq 137438953481 got 137438953481 for osd.5 2026-03-08T23:56:28.145 DEBUG:teuthology.parallel:result is None 2026-03-08T23:56:28.254 INFO:teuthology.orchestra.run.vm04.stdout:34359738386 2026-03-08T23:56:28.276 INFO:teuthology.orchestra.run.vm04.stdout:98784247820 2026-03-08T23:56:28.382 INFO:teuthology.orchestra.run.vm04.stdout:180388626436 2026-03-08T23:56:28.429 INFO:teuthology.orchestra.run.vm04.stdout:115964117003 2026-03-08T23:56:28.431 INFO:tasks.cephadm.ceph_manager.ceph:need seq 98784247820 got 98784247820 for osd.3 2026-03-08T23:56:28.431 DEBUG:teuthology.parallel:result is None 2026-03-08T23:56:28.442 INFO:tasks.cephadm.ceph_manager.ceph:need seq 34359738386 got 34359738386 for osd.0 2026-03-08T23:56:28.442 DEBUG:teuthology.parallel:result is None 2026-03-08T23:56:28.494 INFO:tasks.cephadm.ceph_manager.ceph:need seq 180388626436 got 180388626436 for osd.7 2026-03-08T23:56:28.495 DEBUG:teuthology.parallel:result is None 2026-03-08T23:56:28.526 INFO:tasks.cephadm.ceph_manager.ceph:need seq 115964117003 got 115964117003 for osd.4 2026-03-08T23:56:28.527 DEBUG:teuthology.parallel:result is None 2026-03-08T23:56:28.527 INFO:tasks.cephadm.ceph_manager.ceph:waiting for clean 2026-03-08T23:56:28.527 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -- ceph pg dump --format=json 2026-03-08T23:56:28.743 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/mon.a/config 2026-03-08T23:56:29.124 INFO:teuthology.orchestra.run.vm04.stdout: 2026-03-08T23:56:29.126 INFO:teuthology.orchestra.run.vm04.stderr:dumped all 2026-03-08T23:56:29.126 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:28 vm04 ceph-mon[46823]: pgmap v8: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-08T23:56:29.126 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:28 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:29.126 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:28 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/3488909038' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 5}]: dispatch 2026-03-08T23:56:29.127 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:28 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/4082841601' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 6}]: dispatch 2026-03-08T23:56:29.127 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:28 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/1689734364' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 0}]: dispatch 2026-03-08T23:56:29.127 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:28 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/3273542334' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 3}]: dispatch 2026-03-08T23:56:29.127 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:28 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/3034054334' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 7}]: dispatch 2026-03-08T23:56:29.127 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:28 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/4016021000' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 4}]: dispatch 2026-03-08T23:56:29.127 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:28 vm04 ceph-mon[51053]: pgmap v8: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-08T23:56:29.127 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:28 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:29.127 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:28 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/3488909038' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 5}]: dispatch 2026-03-08T23:56:29.127 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:28 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/4082841601' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 6}]: dispatch 2026-03-08T23:56:29.127 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:28 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/1689734364' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 0}]: dispatch 2026-03-08T23:56:29.127 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:28 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/3273542334' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 3}]: dispatch 2026-03-08T23:56:29.127 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:28 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/3034054334' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 7}]: dispatch 2026-03-08T23:56:29.127 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:28 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/4016021000' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 4}]: dispatch 2026-03-08T23:56:29.189 INFO:teuthology.orchestra.run.vm04.stdout:{"pg_ready":true,"pg_map":{"version":8,"stamp":"2026-03-08T23:56:27.821517+0000","last_osdmap_epoch":0,"last_pg_scan":0,"pg_stats_sum":{"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":192,"num_read_kb":288,"num_write":133,"num_write_kb":1372,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":2,"num_bytes_recovered":397840,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":87,"ondisk_log_size":87,"up":3,"acting":3,"num_store_stats":0},"osd_stats_sum":{"up_from":0,"seq":0,"num_pgs":3,"num_osds":8,"num_per_pool_osds":3,"num_per_pool_omap_osds":3,"kb":167739392,"kb_used":48612,"kb_used_data":4516,"kb_used_omap":0,"kb_used_meta":44032,"kb_avail":167690780,"statfs":{"total":171765137408,"available":171715358720,"internally_reserved":0,"allocated":4624384,"data_stored":2602733,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":45088768},"hb_peers":[],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[]},"pg_stats_delta":{"stat_sum":{"num_bytes":0,"num_objects":0,"num_object_clones":0,"num_object_copies":0,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":0,"num_whiteouts":0,"num_read":0,"num_read_kb":0,"num_write":0,"num_write_kb":0,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":0,"num_bytes_recovered":0,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":0,"ondisk_log_size":0,"up":0,"acting":0,"num_store_stats":0,"stamp_delta":"8.971615"},"pg_stats":[{"pgid":"1.0","version":"45'87","reported_seq":56,"reported_epoch":45,"state":"active+clean","last_fresh":"2026-03-08T23:56:17.893191+0000","last_change":"2026-03-08T23:56:12.063955+0000","last_active":"2026-03-08T23:56:17.893191+0000","last_peered":"2026-03-08T23:56:17.893191+0000","last_clean":"2026-03-08T23:56:17.893191+0000","last_became_active":"2026-03-08T23:56:11.748820+0000","last_became_peered":"2026-03-08T23:56:11.748820+0000","last_unstale":"2026-03-08T23:56:17.893191+0000","last_undegraded":"2026-03-08T23:56:17.893191+0000","last_fullsized":"2026-03-08T23:56:17.893191+0000","mapping_epoch":43,"log_start":"0'0","ondisk_log_start":"0'0","created":18,"last_epoch_clean":44,"parent":"0.0","parent_split_bits":0,"last_scrub":"0'0","last_scrub_stamp":"2026-03-08T23:55:20.090222+0000","last_deep_scrub":"0'0","last_deep_scrub_stamp":"2026-03-08T23:55:20.090222+0000","last_clean_scrub_stamp":"2026-03-08T23:55:20.090222+0000","objects_scrubbed":0,"log_size":87,"ondisk_log_size":87,"stats_invalid":false,"dirty_stats_invalid":false,"omap_stats_invalid":false,"hitset_stats_invalid":false,"hitset_bytes_stats_invalid":false,"pin_stats_invalid":false,"manifest_stats_invalid":false,"snaptrimq_len":0,"last_scrub_duration":0,"scrub_schedule":"periodic scrub scheduled @ 2026-03-10T03:19:53.549043+0000","scrub_duration":0,"objects_trimmed":0,"snaptrim_duration":0,"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":192,"num_read_kb":288,"num_write":133,"num_write_kb":1372,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":2,"num_bytes_recovered":397840,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"up":[7,0,6],"acting":[7,0,6],"avail_no_missing":[],"object_location_counts":[],"blocked_by":[],"up_primary":7,"acting_primary":7,"purged_snaps":[]}],"pool_stats":[{"poolid":1,"num_pg":1,"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":192,"num_read_kb":288,"num_write":133,"num_write_kb":1372,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":2,"num_bytes_recovered":397840,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":1204224,"data_stored":1193520,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":87,"ondisk_log_size":87,"up":3,"acting":3,"num_store_stats":3}],"osd_stats":[{"osd":7,"up_from":42,"seq":180388626436,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":6068,"kb_used_data":812,"kb_used_omap":0,"kb_used_meta":5248,"kb_avail":20961356,"statfs":{"total":21470642176,"available":21464428544,"internally_reserved":0,"allocated":831488,"data_stored":574163,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5373952},"hb_peers":[0,1,2,3,4,5,6],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.80100000000000005}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.66400000000000003}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.76300000000000001}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.78700000000000003}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.65700000000000003}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.72199999999999998}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.64100000000000001}]}]},{"osd":6,"up_from":37,"seq":158913789958,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":6128,"kb_used_data":808,"kb_used_omap":0,"kb_used_meta":5312,"kb_avail":20961296,"statfs":{"total":21470642176,"available":21464367104,"internally_reserved":0,"allocated":827392,"data_stored":573812,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5439488},"hb_peers":[0,1,2,3,4,5,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.45400000000000001}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.57399999999999995}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.68799999999999994}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.435}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.499}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.58499999999999996}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.99399999999999999}]}]},{"osd":1,"up_from":12,"seq":51539607568,"num_pgs":0,"num_osds":1,"num_per_pool_osds":0,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":6240,"kb_used_data":408,"kb_used_omap":0,"kb_used_meta":5824,"kb_avail":20961184,"statfs":{"total":21470642176,"available":21464252416,"internally_reserved":0,"allocated":417792,"data_stored":175484,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5963776},"hb_peers":[0,2,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Sun Mar 8 23:56:10 2026","interfaces":[{"interface":"back","average":{"1min":0.504,"5min":0.504,"15min":0.504},"min":{"1min":0.30299999999999999,"5min":0.30299999999999999,"15min":0.30299999999999999},"max":{"1min":0.85899999999999999,"5min":0.85899999999999999,"15min":0.85899999999999999},"last":1.3540000000000001},{"interface":"front","average":{"1min":0.52000000000000002,"5min":0.52000000000000002,"15min":0.52000000000000002},"min":{"1min":0.222,"5min":0.222,"15min":0.222},"max":{"1min":0.73299999999999998,"5min":0.73299999999999998,"15min":0.73299999999999998},"last":1.038}]},{"osd":2,"last update":"Sun Mar 8 23:56:22 2026","interfaces":[{"interface":"back","average":{"1min":0.59199999999999997,"5min":0.59199999999999997,"15min":0.59199999999999997},"min":{"1min":0.33600000000000002,"5min":0.33600000000000002,"15min":0.33600000000000002},"max":{"1min":1.278,"5min":1.278,"15min":1.278},"last":1.278},{"interface":"front","average":{"1min":0.621,"5min":0.621,"15min":0.621},"min":{"1min":0.23300000000000001,"5min":0.23300000000000001,"15min":0.23300000000000001},"max":{"1min":1.343,"5min":1.343,"15min":1.343},"last":1.343}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":1.3680000000000001}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":1.2290000000000001}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":1.048}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":1.097}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":1.3109999999999999}]}]},{"osd":0,"up_from":8,"seq":34359738386,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":6708,"kb_used_data":812,"kb_used_omap":0,"kb_used_meta":5888,"kb_avail":20960716,"statfs":{"total":21470642176,"available":21463773184,"internally_reserved":0,"allocated":831488,"data_stored":574177,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":6029312},"hb_peers":[1,2,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":1,"last update":"Sun Mar 8 23:56:11 2026","interfaces":[{"interface":"back","average":{"1min":0.56399999999999995,"5min":0.56399999999999995,"15min":0.56399999999999995},"min":{"1min":0.28100000000000003,"5min":0.28100000000000003,"15min":0.28100000000000003},"max":{"1min":0.89000000000000001,"5min":0.89000000000000001,"15min":0.89000000000000001},"last":0.67200000000000004},{"interface":"front","average":{"1min":0.55500000000000005,"5min":0.55500000000000005,"15min":0.55500000000000005},"min":{"1min":0.222,"5min":0.222,"15min":0.222},"max":{"1min":0.85599999999999998,"5min":0.85599999999999998,"15min":0.85599999999999998},"last":0.498}]},{"osd":2,"last update":"Sun Mar 8 23:56:20 2026","interfaces":[{"interface":"back","average":{"1min":0.57099999999999995,"5min":0.57099999999999995,"15min":0.57099999999999995},"min":{"1min":0.29799999999999999,"5min":0.29799999999999999,"15min":0.29799999999999999},"max":{"1min":1.1779999999999999,"5min":1.1779999999999999,"15min":1.1779999999999999},"last":0.61199999999999999},{"interface":"front","average":{"1min":0.61399999999999999,"5min":0.61399999999999999,"15min":0.61399999999999999},"min":{"1min":0.222,"5min":0.222,"15min":0.222},"max":{"1min":1.1899999999999999,"5min":1.1899999999999999,"15min":1.1899999999999999},"last":0.66400000000000003}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.624}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.47599999999999998}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.55000000000000004}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.48899999999999999}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.64200000000000002}]}]},{"osd":2,"up_from":17,"seq":73014444046,"num_pgs":0,"num_osds":1,"num_per_pool_osds":0,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":6184,"kb_used_data":416,"kb_used_omap":0,"kb_used_meta":5760,"kb_avail":20961240,"statfs":{"total":21470642176,"available":21464309760,"internally_reserved":0,"allocated":425984,"data_stored":176079,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5898240},"hb_peers":[0,1,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Sun Mar 8 23:56:22 2026","interfaces":[{"interface":"back","average":{"1min":0.57999999999999996,"5min":0.57999999999999996,"15min":0.57999999999999996},"min":{"1min":0.29899999999999999,"5min":0.29899999999999999,"15min":0.29899999999999999},"max":{"1min":0.98699999999999999,"5min":0.98699999999999999,"15min":0.98699999999999999},"last":0.58799999999999997},{"interface":"front","average":{"1min":0.60899999999999999,"5min":0.60899999999999999,"15min":0.60899999999999999},"min":{"1min":0.308,"5min":0.308,"15min":0.308},"max":{"1min":1.1890000000000001,"5min":1.1890000000000001,"15min":1.1890000000000001},"last":0.495}]},{"osd":1,"last update":"Sun Mar 8 23:56:22 2026","interfaces":[{"interface":"back","average":{"1min":0.64100000000000001,"5min":0.64100000000000001,"15min":0.64100000000000001},"min":{"1min":0.32000000000000001,"5min":0.32000000000000001,"15min":0.32000000000000001},"max":{"1min":1.232,"5min":1.232,"15min":1.232},"last":0.71399999999999997},{"interface":"front","average":{"1min":0.55600000000000005,"5min":0.55600000000000005,"15min":0.55600000000000005},"min":{"1min":0.29399999999999998,"5min":0.29399999999999998,"15min":0.29399999999999998},"max":{"1min":1.0389999999999999,"5min":1.0389999999999999,"15min":1.0389999999999999},"last":0.52700000000000002}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.54100000000000004}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.61099999999999999}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.47399999999999998}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.56000000000000005}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.60299999999999998}]}]},{"osd":3,"up_from":23,"seq":98784247820,"num_pgs":0,"num_osds":1,"num_per_pool_osds":0,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":5744,"kb_used_data":424,"kb_used_omap":0,"kb_used_meta":5312,"kb_avail":20961680,"statfs":{"total":21470642176,"available":21464760320,"internally_reserved":0,"allocated":434176,"data_stored":176638,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5439488},"hb_peers":[0,1,2,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.42199999999999999}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.68300000000000005}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.316}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.76100000000000001}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.443}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.39000000000000001}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.49299999999999999}]}]},{"osd":4,"up_from":27,"seq":115964117003,"num_pgs":0,"num_osds":1,"num_per_pool_osds":0,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":5808,"kb_used_data":424,"kb_used_omap":0,"kb_used_meta":5376,"kb_avail":20961616,"statfs":{"total":21470642176,"available":21464694784,"internally_reserved":0,"allocated":434176,"data_stored":176638,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5505024},"hb_peers":[0,1,2,3,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.98099999999999998}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.56499999999999995}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.55400000000000005}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.51600000000000001}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.78400000000000003}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.81899999999999995}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":3.0289999999999999}]}]},{"osd":5,"up_from":32,"seq":137438953481,"num_pgs":0,"num_osds":1,"num_per_pool_osds":0,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":5732,"kb_used_data":412,"kb_used_omap":0,"kb_used_meta":5312,"kb_avail":20961692,"statfs":{"total":21470642176,"available":21464772608,"internally_reserved":0,"allocated":421888,"data_stored":175742,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5439488},"hb_peers":[0,1,2,3,4,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":1.5149999999999999}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":1.371}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":1.4590000000000001}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":1.3939999999999999}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.77700000000000002}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.80400000000000005}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.501}]}]}],"pool_statfs":[{"poolid":1,"osd":0,"total":0,"available":0,"internally_reserved":0,"allocated":401408,"data_stored":397840,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":6,"total":0,"available":0,"internally_reserved":0,"allocated":401408,"data_stored":397840,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":7,"total":0,"available":0,"internally_reserved":0,"allocated":401408,"data_stored":397840,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0}]}} 2026-03-08T23:56:29.190 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -- ceph pg dump --format=json 2026-03-08T23:56:29.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:28 vm10 ceph-mon[48982]: pgmap v8: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-08T23:56:29.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:28 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:29.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:28 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/3488909038' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 5}]: dispatch 2026-03-08T23:56:29.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:28 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/4082841601' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 6}]: dispatch 2026-03-08T23:56:29.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:28 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/1689734364' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 0}]: dispatch 2026-03-08T23:56:29.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:28 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/3273542334' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 3}]: dispatch 2026-03-08T23:56:29.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:28 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/3034054334' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 7}]: dispatch 2026-03-08T23:56:29.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:28 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/4016021000' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 4}]: dispatch 2026-03-08T23:56:29.368 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/mon.a/config 2026-03-08T23:56:29.764 INFO:teuthology.orchestra.run.vm04.stdout: 2026-03-08T23:56:29.769 INFO:teuthology.orchestra.run.vm04.stderr:dumped all 2026-03-08T23:56:29.834 INFO:teuthology.orchestra.run.vm04.stdout:{"pg_ready":true,"pg_map":{"version":8,"stamp":"2026-03-08T23:56:27.821517+0000","last_osdmap_epoch":0,"last_pg_scan":0,"pg_stats_sum":{"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":192,"num_read_kb":288,"num_write":133,"num_write_kb":1372,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":2,"num_bytes_recovered":397840,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":87,"ondisk_log_size":87,"up":3,"acting":3,"num_store_stats":0},"osd_stats_sum":{"up_from":0,"seq":0,"num_pgs":3,"num_osds":8,"num_per_pool_osds":3,"num_per_pool_omap_osds":3,"kb":167739392,"kb_used":48612,"kb_used_data":4516,"kb_used_omap":0,"kb_used_meta":44032,"kb_avail":167690780,"statfs":{"total":171765137408,"available":171715358720,"internally_reserved":0,"allocated":4624384,"data_stored":2602733,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":45088768},"hb_peers":[],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[]},"pg_stats_delta":{"stat_sum":{"num_bytes":0,"num_objects":0,"num_object_clones":0,"num_object_copies":0,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":0,"num_whiteouts":0,"num_read":0,"num_read_kb":0,"num_write":0,"num_write_kb":0,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":0,"num_bytes_recovered":0,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":0,"ondisk_log_size":0,"up":0,"acting":0,"num_store_stats":0,"stamp_delta":"8.971615"},"pg_stats":[{"pgid":"1.0","version":"45'87","reported_seq":56,"reported_epoch":45,"state":"active+clean","last_fresh":"2026-03-08T23:56:17.893191+0000","last_change":"2026-03-08T23:56:12.063955+0000","last_active":"2026-03-08T23:56:17.893191+0000","last_peered":"2026-03-08T23:56:17.893191+0000","last_clean":"2026-03-08T23:56:17.893191+0000","last_became_active":"2026-03-08T23:56:11.748820+0000","last_became_peered":"2026-03-08T23:56:11.748820+0000","last_unstale":"2026-03-08T23:56:17.893191+0000","last_undegraded":"2026-03-08T23:56:17.893191+0000","last_fullsized":"2026-03-08T23:56:17.893191+0000","mapping_epoch":43,"log_start":"0'0","ondisk_log_start":"0'0","created":18,"last_epoch_clean":44,"parent":"0.0","parent_split_bits":0,"last_scrub":"0'0","last_scrub_stamp":"2026-03-08T23:55:20.090222+0000","last_deep_scrub":"0'0","last_deep_scrub_stamp":"2026-03-08T23:55:20.090222+0000","last_clean_scrub_stamp":"2026-03-08T23:55:20.090222+0000","objects_scrubbed":0,"log_size":87,"ondisk_log_size":87,"stats_invalid":false,"dirty_stats_invalid":false,"omap_stats_invalid":false,"hitset_stats_invalid":false,"hitset_bytes_stats_invalid":false,"pin_stats_invalid":false,"manifest_stats_invalid":false,"snaptrimq_len":0,"last_scrub_duration":0,"scrub_schedule":"periodic scrub scheduled @ 2026-03-10T03:19:53.549043+0000","scrub_duration":0,"objects_trimmed":0,"snaptrim_duration":0,"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":192,"num_read_kb":288,"num_write":133,"num_write_kb":1372,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":2,"num_bytes_recovered":397840,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"up":[7,0,6],"acting":[7,0,6],"avail_no_missing":[],"object_location_counts":[],"blocked_by":[],"up_primary":7,"acting_primary":7,"purged_snaps":[]}],"pool_stats":[{"poolid":1,"num_pg":1,"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":192,"num_read_kb":288,"num_write":133,"num_write_kb":1372,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":2,"num_bytes_recovered":397840,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":1204224,"data_stored":1193520,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":87,"ondisk_log_size":87,"up":3,"acting":3,"num_store_stats":3}],"osd_stats":[{"osd":7,"up_from":42,"seq":180388626436,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":6068,"kb_used_data":812,"kb_used_omap":0,"kb_used_meta":5248,"kb_avail":20961356,"statfs":{"total":21470642176,"available":21464428544,"internally_reserved":0,"allocated":831488,"data_stored":574163,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5373952},"hb_peers":[0,1,2,3,4,5,6],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.80100000000000005}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.66400000000000003}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.76300000000000001}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.78700000000000003}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.65700000000000003}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.72199999999999998}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.64100000000000001}]}]},{"osd":6,"up_from":37,"seq":158913789958,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":6128,"kb_used_data":808,"kb_used_omap":0,"kb_used_meta":5312,"kb_avail":20961296,"statfs":{"total":21470642176,"available":21464367104,"internally_reserved":0,"allocated":827392,"data_stored":573812,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5439488},"hb_peers":[0,1,2,3,4,5,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.45400000000000001}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.57399999999999995}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.68799999999999994}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.435}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.499}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.58499999999999996}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.99399999999999999}]}]},{"osd":1,"up_from":12,"seq":51539607568,"num_pgs":0,"num_osds":1,"num_per_pool_osds":0,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":6240,"kb_used_data":408,"kb_used_omap":0,"kb_used_meta":5824,"kb_avail":20961184,"statfs":{"total":21470642176,"available":21464252416,"internally_reserved":0,"allocated":417792,"data_stored":175484,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5963776},"hb_peers":[0,2,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Sun Mar 8 23:56:10 2026","interfaces":[{"interface":"back","average":{"1min":0.504,"5min":0.504,"15min":0.504},"min":{"1min":0.30299999999999999,"5min":0.30299999999999999,"15min":0.30299999999999999},"max":{"1min":0.85899999999999999,"5min":0.85899999999999999,"15min":0.85899999999999999},"last":1.3540000000000001},{"interface":"front","average":{"1min":0.52000000000000002,"5min":0.52000000000000002,"15min":0.52000000000000002},"min":{"1min":0.222,"5min":0.222,"15min":0.222},"max":{"1min":0.73299999999999998,"5min":0.73299999999999998,"15min":0.73299999999999998},"last":1.038}]},{"osd":2,"last update":"Sun Mar 8 23:56:22 2026","interfaces":[{"interface":"back","average":{"1min":0.59199999999999997,"5min":0.59199999999999997,"15min":0.59199999999999997},"min":{"1min":0.33600000000000002,"5min":0.33600000000000002,"15min":0.33600000000000002},"max":{"1min":1.278,"5min":1.278,"15min":1.278},"last":1.278},{"interface":"front","average":{"1min":0.621,"5min":0.621,"15min":0.621},"min":{"1min":0.23300000000000001,"5min":0.23300000000000001,"15min":0.23300000000000001},"max":{"1min":1.343,"5min":1.343,"15min":1.343},"last":1.343}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":1.3680000000000001}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":1.2290000000000001}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":1.048}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":1.097}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":1.3109999999999999}]}]},{"osd":0,"up_from":8,"seq":34359738386,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":6708,"kb_used_data":812,"kb_used_omap":0,"kb_used_meta":5888,"kb_avail":20960716,"statfs":{"total":21470642176,"available":21463773184,"internally_reserved":0,"allocated":831488,"data_stored":574177,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":6029312},"hb_peers":[1,2,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":1,"last update":"Sun Mar 8 23:56:11 2026","interfaces":[{"interface":"back","average":{"1min":0.56399999999999995,"5min":0.56399999999999995,"15min":0.56399999999999995},"min":{"1min":0.28100000000000003,"5min":0.28100000000000003,"15min":0.28100000000000003},"max":{"1min":0.89000000000000001,"5min":0.89000000000000001,"15min":0.89000000000000001},"last":0.67200000000000004},{"interface":"front","average":{"1min":0.55500000000000005,"5min":0.55500000000000005,"15min":0.55500000000000005},"min":{"1min":0.222,"5min":0.222,"15min":0.222},"max":{"1min":0.85599999999999998,"5min":0.85599999999999998,"15min":0.85599999999999998},"last":0.498}]},{"osd":2,"last update":"Sun Mar 8 23:56:20 2026","interfaces":[{"interface":"back","average":{"1min":0.57099999999999995,"5min":0.57099999999999995,"15min":0.57099999999999995},"min":{"1min":0.29799999999999999,"5min":0.29799999999999999,"15min":0.29799999999999999},"max":{"1min":1.1779999999999999,"5min":1.1779999999999999,"15min":1.1779999999999999},"last":0.61199999999999999},{"interface":"front","average":{"1min":0.61399999999999999,"5min":0.61399999999999999,"15min":0.61399999999999999},"min":{"1min":0.222,"5min":0.222,"15min":0.222},"max":{"1min":1.1899999999999999,"5min":1.1899999999999999,"15min":1.1899999999999999},"last":0.66400000000000003}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.624}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.47599999999999998}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.55000000000000004}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.48899999999999999}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.64200000000000002}]}]},{"osd":2,"up_from":17,"seq":73014444046,"num_pgs":0,"num_osds":1,"num_per_pool_osds":0,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":6184,"kb_used_data":416,"kb_used_omap":0,"kb_used_meta":5760,"kb_avail":20961240,"statfs":{"total":21470642176,"available":21464309760,"internally_reserved":0,"allocated":425984,"data_stored":176079,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5898240},"hb_peers":[0,1,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Sun Mar 8 23:56:22 2026","interfaces":[{"interface":"back","average":{"1min":0.57999999999999996,"5min":0.57999999999999996,"15min":0.57999999999999996},"min":{"1min":0.29899999999999999,"5min":0.29899999999999999,"15min":0.29899999999999999},"max":{"1min":0.98699999999999999,"5min":0.98699999999999999,"15min":0.98699999999999999},"last":0.58799999999999997},{"interface":"front","average":{"1min":0.60899999999999999,"5min":0.60899999999999999,"15min":0.60899999999999999},"min":{"1min":0.308,"5min":0.308,"15min":0.308},"max":{"1min":1.1890000000000001,"5min":1.1890000000000001,"15min":1.1890000000000001},"last":0.495}]},{"osd":1,"last update":"Sun Mar 8 23:56:22 2026","interfaces":[{"interface":"back","average":{"1min":0.64100000000000001,"5min":0.64100000000000001,"15min":0.64100000000000001},"min":{"1min":0.32000000000000001,"5min":0.32000000000000001,"15min":0.32000000000000001},"max":{"1min":1.232,"5min":1.232,"15min":1.232},"last":0.71399999999999997},{"interface":"front","average":{"1min":0.55600000000000005,"5min":0.55600000000000005,"15min":0.55600000000000005},"min":{"1min":0.29399999999999998,"5min":0.29399999999999998,"15min":0.29399999999999998},"max":{"1min":1.0389999999999999,"5min":1.0389999999999999,"15min":1.0389999999999999},"last":0.52700000000000002}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.54100000000000004}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.61099999999999999}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.47399999999999998}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.56000000000000005}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.60299999999999998}]}]},{"osd":3,"up_from":23,"seq":98784247820,"num_pgs":0,"num_osds":1,"num_per_pool_osds":0,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":5744,"kb_used_data":424,"kb_used_omap":0,"kb_used_meta":5312,"kb_avail":20961680,"statfs":{"total":21470642176,"available":21464760320,"internally_reserved":0,"allocated":434176,"data_stored":176638,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5439488},"hb_peers":[0,1,2,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.42199999999999999}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.68300000000000005}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.316}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.76100000000000001}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.443}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.39000000000000001}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.49299999999999999}]}]},{"osd":4,"up_from":27,"seq":115964117003,"num_pgs":0,"num_osds":1,"num_per_pool_osds":0,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":5808,"kb_used_data":424,"kb_used_omap":0,"kb_used_meta":5376,"kb_avail":20961616,"statfs":{"total":21470642176,"available":21464694784,"internally_reserved":0,"allocated":434176,"data_stored":176638,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5505024},"hb_peers":[0,1,2,3,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.98099999999999998}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.56499999999999995}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.55400000000000005}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.51600000000000001}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.78400000000000003}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.81899999999999995}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":3.0289999999999999}]}]},{"osd":5,"up_from":32,"seq":137438953481,"num_pgs":0,"num_osds":1,"num_per_pool_osds":0,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":5732,"kb_used_data":412,"kb_used_omap":0,"kb_used_meta":5312,"kb_avail":20961692,"statfs":{"total":21470642176,"available":21464772608,"internally_reserved":0,"allocated":421888,"data_stored":175742,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5439488},"hb_peers":[0,1,2,3,4,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":1.5149999999999999}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":1.371}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":1.4590000000000001}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":1.3939999999999999}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.77700000000000002}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.80400000000000005}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.501}]}]}],"pool_statfs":[{"poolid":1,"osd":0,"total":0,"available":0,"internally_reserved":0,"allocated":401408,"data_stored":397840,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":6,"total":0,"available":0,"internally_reserved":0,"allocated":401408,"data_stored":397840,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":7,"total":0,"available":0,"internally_reserved":0,"allocated":401408,"data_stored":397840,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0}]}} 2026-03-08T23:56:29.835 INFO:tasks.cephadm.ceph_manager.ceph:clean! 2026-03-08T23:56:29.835 INFO:tasks.ceph:Waiting until ceph cluster ceph is healthy... 2026-03-08T23:56:29.835 INFO:tasks.cephadm.ceph_manager.ceph:wait_until_healthy 2026-03-08T23:56:29.835 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -- ceph health --format=json 2026-03-08T23:56:29.947 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:29 vm10 ceph-mon[48982]: from='client.14523 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-08T23:56:30.012 INFO:teuthology.orchestra.run.vm04.stderr:Inferring config /var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/mon.a/config 2026-03-08T23:56:30.043 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:29 vm04 ceph-mon[46823]: from='client.14523 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-08T23:56:30.043 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:29 vm04 ceph-mon[51053]: from='client.14523 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-08T23:56:30.327 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 08 23:56:30 vm10 systemd[1]: Starting Ceph prometheus.a for fdcbddf6-1b49-11f1-80b0-7392062373f9... 2026-03-08T23:56:30.429 INFO:teuthology.orchestra.run.vm04.stdout: 2026-03-08T23:56:30.429 INFO:teuthology.orchestra.run.vm04.stdout:{"status":"HEALTH_OK","checks":{},"mutes":[]} 2026-03-08T23:56:30.508 INFO:tasks.cephadm.ceph_manager.ceph:wait_until_healthy done 2026-03-08T23:56:30.508 INFO:tasks.cephadm:Setup complete, yielding 2026-03-08T23:56:30.508 INFO:teuthology.run_tasks:Running task cephadm.shell... 2026-03-08T23:56:30.510 INFO:tasks.cephadm:Running commands on role mon.a host ubuntu@vm04.local 2026-03-08T23:56:30.510 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'radosgw-admin realm create --rgw-realm=r --default' 2026-03-08T23:56:30.828 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 08 23:56:30 vm10 podman[63886]: 2026-03-08 23:56:30.373891163 +0000 UTC m=+0.023614481 container create 9570e658ccd114778a7d09f8f20c1ab17f9e872371121206959b318ef4d4d8e4 (image=quay.io/prometheus/prometheus:v2.33.4, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-08T23:56:30.828 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 08 23:56:30 vm10 podman[63886]: 2026-03-08 23:56:30.405516452 +0000 UTC m=+0.055239781 container init 9570e658ccd114778a7d09f8f20c1ab17f9e872371121206959b318ef4d4d8e4 (image=quay.io/prometheus/prometheus:v2.33.4, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-08T23:56:30.828 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 08 23:56:30 vm10 podman[63886]: 2026-03-08 23:56:30.408184889 +0000 UTC m=+0.057908207 container start 9570e658ccd114778a7d09f8f20c1ab17f9e872371121206959b318ef4d4d8e4 (image=quay.io/prometheus/prometheus:v2.33.4, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-08T23:56:30.828 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 08 23:56:30 vm10 bash[63886]: 9570e658ccd114778a7d09f8f20c1ab17f9e872371121206959b318ef4d4d8e4 2026-03-08T23:56:30.828 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 08 23:56:30 vm10 podman[63886]: 2026-03-08 23:56:30.365489827 +0000 UTC m=+0.015213145 image pull 514e6a882f6e74806a5856468489eeff8d7106095557578da96935e4d0ba4d9d quay.io/prometheus/prometheus:v2.33.4 2026-03-08T23:56:30.828 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 08 23:56:30 vm10 systemd[1]: Started Ceph prometheus.a for fdcbddf6-1b49-11f1-80b0-7392062373f9. 2026-03-08T23:56:30.828 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 08 23:56:30 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[63897]: ts=2026-03-08T23:56:30.453Z caller=main.go:475 level=info msg="No time or size retention was set so using the default time retention" duration=15d 2026-03-08T23:56:30.828 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 08 23:56:30 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[63897]: ts=2026-03-08T23:56:30.453Z caller=main.go:512 level=info msg="Starting Prometheus" version="(version=2.33.4, branch=HEAD, revision=83032011a5d3e6102624fe58241a374a7201fee8)" 2026-03-08T23:56:30.828 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 08 23:56:30 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[63897]: ts=2026-03-08T23:56:30.453Z caller=main.go:517 level=info build_context="(go=go1.17.7, user=root@d13bf69e7be8, date=20220222-16:51:28)" 2026-03-08T23:56:30.828 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 08 23:56:30 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[63897]: ts=2026-03-08T23:56:30.453Z caller=main.go:518 level=info host_details="(Linux 5.14.0-686.el9.x86_64 #1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026 x86_64 vm10 (none))" 2026-03-08T23:56:30.828 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 08 23:56:30 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[63897]: ts=2026-03-08T23:56:30.453Z caller=main.go:519 level=info fd_limits="(soft=1048576, hard=1048576)" 2026-03-08T23:56:30.828 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 08 23:56:30 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[63897]: ts=2026-03-08T23:56:30.453Z caller=main.go:520 level=info vm_limits="(soft=unlimited, hard=unlimited)" 2026-03-08T23:56:30.829 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 08 23:56:30 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[63897]: ts=2026-03-08T23:56:30.454Z caller=web.go:570 level=info component=web msg="Start listening for connections" address=:9095 2026-03-08T23:56:30.829 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 08 23:56:30 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[63897]: ts=2026-03-08T23:56:30.455Z caller=main.go:923 level=info msg="Starting TSDB ..." 2026-03-08T23:56:30.829 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 08 23:56:30 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[63897]: ts=2026-03-08T23:56:30.455Z caller=tls_config.go:195 level=info component=web msg="TLS is disabled." http2=false 2026-03-08T23:56:30.829 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 08 23:56:30 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[63897]: ts=2026-03-08T23:56:30.457Z caller=head.go:493 level=info component=tsdb msg="Replaying on-disk memory mappable chunks if any" 2026-03-08T23:56:30.829 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 08 23:56:30 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[63897]: ts=2026-03-08T23:56:30.457Z caller=head.go:527 level=info component=tsdb msg="On-disk memory mappable chunks replay completed" duration=1.442µs 2026-03-08T23:56:30.829 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 08 23:56:30 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[63897]: ts=2026-03-08T23:56:30.457Z caller=head.go:533 level=info component=tsdb msg="Replaying WAL, this may take a while" 2026-03-08T23:56:30.829 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 08 23:56:30 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[63897]: ts=2026-03-08T23:56:30.457Z caller=head.go:604 level=info component=tsdb msg="WAL segment loaded" segment=0 maxSegment=0 2026-03-08T23:56:30.829 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 08 23:56:30 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[63897]: ts=2026-03-08T23:56:30.457Z caller=head.go:610 level=info component=tsdb msg="WAL replay completed" checkpoint_replay_duration=135.573µs wal_replay_duration=311.38µs total_replay_duration=459.887µs 2026-03-08T23:56:30.829 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 08 23:56:30 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[63897]: ts=2026-03-08T23:56:30.458Z caller=main.go:944 level=info fs_type=XFS_SUPER_MAGIC 2026-03-08T23:56:30.829 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 08 23:56:30 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[63897]: ts=2026-03-08T23:56:30.458Z caller=main.go:947 level=info msg="TSDB started" 2026-03-08T23:56:30.829 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 08 23:56:30 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[63897]: ts=2026-03-08T23:56:30.458Z caller=main.go:1128 level=info msg="Loading configuration file" filename=/etc/prometheus/prometheus.yml 2026-03-08T23:56:30.829 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 08 23:56:30 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[63897]: ts=2026-03-08T23:56:30.470Z caller=main.go:1165 level=info msg="Completed loading of configuration file" filename=/etc/prometheus/prometheus.yml totalDuration=12.46497ms db_storage=972ns remote_storage=1.574µs web_handler=741ns query_engine=1.122µs scrape=2.870852ms scrape_sd=34.775µs notify=962ns notify_sd=1.513µs rules=9.260965ms 2026-03-08T23:56:30.829 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 08 23:56:30 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[63897]: ts=2026-03-08T23:56:30.470Z caller=main.go:896 level=info msg="Server is ready to receive web requests." 2026-03-08T23:56:31.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:30 vm04 ceph-mon[51053]: from='client.24436 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-08T23:56:31.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:30 vm04 ceph-mon[51053]: pgmap v9: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-08T23:56:31.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:30 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/392123035' entity='client.admin' cmd=[{"prefix": "health", "format": "json"}]: dispatch 2026-03-08T23:56:31.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:30 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:31.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:30 vm04 ceph-mon[46823]: from='client.24436 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-08T23:56:31.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:30 vm04 ceph-mon[46823]: pgmap v9: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-08T23:56:31.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:30 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/392123035' entity='client.admin' cmd=[{"prefix": "health", "format": "json"}]: dispatch 2026-03-08T23:56:31.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:30 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:31.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:30 vm10 ceph-mon[48982]: from='client.24436 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-08T23:56:31.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:30 vm10 ceph-mon[48982]: pgmap v9: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-08T23:56:31.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:30 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/392123035' entity='client.admin' cmd=[{"prefix": "health", "format": "json"}]: dispatch 2026-03-08T23:56:31.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:30 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:32.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:31 vm10 ceph-mon[48982]: Deploying daemon alertmanager.a on vm04 2026-03-08T23:56:32.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:31 vm04 ceph-mon[51053]: Deploying daemon alertmanager.a on vm04 2026-03-08T23:56:32.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:31 vm04 ceph-mon[46823]: Deploying daemon alertmanager.a on vm04 2026-03-08T23:56:32.986 INFO:teuthology.orchestra.run.vm04.stdout:{ 2026-03-08T23:56:32.986 INFO:teuthology.orchestra.run.vm04.stdout: "id": "9f7e698e-39c6-493b-8124-1bd555e78347", 2026-03-08T23:56:32.986 INFO:teuthology.orchestra.run.vm04.stdout: "name": "r", 2026-03-08T23:56:32.986 INFO:teuthology.orchestra.run.vm04.stdout: "current_period": "e6112855-1729-4ded-8b84-2ccf75c809c3", 2026-03-08T23:56:32.986 INFO:teuthology.orchestra.run.vm04.stdout: "epoch": 1 2026-03-08T23:56:32.986 INFO:teuthology.orchestra.run.vm04.stdout:} 2026-03-08T23:56:33.027 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'radosgw-admin zonegroup create --rgw-zonegroup=default --master --default' 2026-03-08T23:56:33.182 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:32 vm04 ceph-mon[46823]: pgmap v10: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-08T23:56:33.182 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:32 vm04 ceph-mon[46823]: osdmap e46: 8 total, 8 up, 8 in 2026-03-08T23:56:33.182 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:32 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/1696314687' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]: dispatch 2026-03-08T23:56:33.182 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:32 vm04 ceph-mon[46823]: from='client.? ' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]: dispatch 2026-03-08T23:56:33.182 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:32 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:33.182 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:32 vm04 ceph-mon[51053]: pgmap v10: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-08T23:56:33.182 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:32 vm04 ceph-mon[51053]: osdmap e46: 8 total, 8 up, 8 in 2026-03-08T23:56:33.182 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:32 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/1696314687' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]: dispatch 2026-03-08T23:56:33.182 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:32 vm04 ceph-mon[51053]: from='client.? ' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]: dispatch 2026-03-08T23:56:33.182 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:32 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:33.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:32 vm10 ceph-mon[48982]: pgmap v10: 1 pgs: 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-08T23:56:33.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:32 vm10 ceph-mon[48982]: osdmap e46: 8 total, 8 up, 8 in 2026-03-08T23:56:33.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:32 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/1696314687' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]: dispatch 2026-03-08T23:56:33.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:32 vm10 ceph-mon[48982]: from='client.? ' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]: dispatch 2026-03-08T23:56:33.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:32 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:33.685 INFO:teuthology.orchestra.run.vm04.stdout:{ 2026-03-08T23:56:33.685 INFO:teuthology.orchestra.run.vm04.stdout: "id": "abd026c4-4605-4641-9f60-0695221707ac", 2026-03-08T23:56:33.685 INFO:teuthology.orchestra.run.vm04.stdout: "name": "default", 2026-03-08T23:56:33.685 INFO:teuthology.orchestra.run.vm04.stdout: "api_name": "default", 2026-03-08T23:56:33.685 INFO:teuthology.orchestra.run.vm04.stdout: "is_master": "true", 2026-03-08T23:56:33.685 INFO:teuthology.orchestra.run.vm04.stdout: "endpoints": [], 2026-03-08T23:56:33.686 INFO:teuthology.orchestra.run.vm04.stdout: "hostnames": [], 2026-03-08T23:56:33.686 INFO:teuthology.orchestra.run.vm04.stdout: "hostnames_s3website": [], 2026-03-08T23:56:33.686 INFO:teuthology.orchestra.run.vm04.stdout: "master_zone": "", 2026-03-08T23:56:33.686 INFO:teuthology.orchestra.run.vm04.stdout: "zones": [], 2026-03-08T23:56:33.686 INFO:teuthology.orchestra.run.vm04.stdout: "placement_targets": [], 2026-03-08T23:56:33.686 INFO:teuthology.orchestra.run.vm04.stdout: "default_placement": "", 2026-03-08T23:56:33.686 INFO:teuthology.orchestra.run.vm04.stdout: "realm_id": "9f7e698e-39c6-493b-8124-1bd555e78347", 2026-03-08T23:56:33.686 INFO:teuthology.orchestra.run.vm04.stdout: "sync_policy": { 2026-03-08T23:56:33.686 INFO:teuthology.orchestra.run.vm04.stdout: "groups": [] 2026-03-08T23:56:33.686 INFO:teuthology.orchestra.run.vm04.stdout: } 2026-03-08T23:56:33.686 INFO:teuthology.orchestra.run.vm04.stdout:} 2026-03-08T23:56:33.934 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'radosgw-admin zone create --rgw-zonegroup=default --rgw-zone=z --master --default' 2026-03-08T23:56:34.066 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:34 vm04 ceph-mon[46823]: from='client.? ' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]': finished 2026-03-08T23:56:34.066 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:34 vm04 ceph-mon[46823]: osdmap e47: 8 total, 8 up, 8 in 2026-03-08T23:56:34.066 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:34 vm04 ceph-mon[51053]: from='client.? ' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]': finished 2026-03-08T23:56:34.066 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:34 vm04 ceph-mon[51053]: osdmap e47: 8 total, 8 up, 8 in 2026-03-08T23:56:34.306 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:34 vm10 ceph-mon[48982]: from='client.? ' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]': finished 2026-03-08T23:56:34.306 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:34 vm10 ceph-mon[48982]: osdmap e47: 8 total, 8 up, 8 in 2026-03-08T23:56:34.472 INFO:teuthology.orchestra.run.vm04.stdout:{ 2026-03-08T23:56:34.472 INFO:teuthology.orchestra.run.vm04.stdout: "id": "f9cb6fe5-b225-4a57-a859-d82574b3c584", 2026-03-08T23:56:34.472 INFO:teuthology.orchestra.run.vm04.stdout: "name": "z", 2026-03-08T23:56:34.472 INFO:teuthology.orchestra.run.vm04.stdout: "domain_root": "z.rgw.meta:root", 2026-03-08T23:56:34.472 INFO:teuthology.orchestra.run.vm04.stdout: "control_pool": "z.rgw.control", 2026-03-08T23:56:34.472 INFO:teuthology.orchestra.run.vm04.stdout: "gc_pool": "z.rgw.log:gc", 2026-03-08T23:56:34.472 INFO:teuthology.orchestra.run.vm04.stdout: "lc_pool": "z.rgw.log:lc", 2026-03-08T23:56:34.472 INFO:teuthology.orchestra.run.vm04.stdout: "log_pool": "z.rgw.log", 2026-03-08T23:56:34.472 INFO:teuthology.orchestra.run.vm04.stdout: "intent_log_pool": "z.rgw.log:intent", 2026-03-08T23:56:34.472 INFO:teuthology.orchestra.run.vm04.stdout: "usage_log_pool": "z.rgw.log:usage", 2026-03-08T23:56:34.472 INFO:teuthology.orchestra.run.vm04.stdout: "roles_pool": "z.rgw.meta:roles", 2026-03-08T23:56:34.472 INFO:teuthology.orchestra.run.vm04.stdout: "reshard_pool": "z.rgw.log:reshard", 2026-03-08T23:56:34.472 INFO:teuthology.orchestra.run.vm04.stdout: "user_keys_pool": "z.rgw.meta:users.keys", 2026-03-08T23:56:34.472 INFO:teuthology.orchestra.run.vm04.stdout: "user_email_pool": "z.rgw.meta:users.email", 2026-03-08T23:56:34.472 INFO:teuthology.orchestra.run.vm04.stdout: "user_swift_pool": "z.rgw.meta:users.swift", 2026-03-08T23:56:34.472 INFO:teuthology.orchestra.run.vm04.stdout: "user_uid_pool": "z.rgw.meta:users.uid", 2026-03-08T23:56:34.472 INFO:teuthology.orchestra.run.vm04.stdout: "otp_pool": "z.rgw.otp", 2026-03-08T23:56:34.472 INFO:teuthology.orchestra.run.vm04.stdout: "system_key": { 2026-03-08T23:56:34.472 INFO:teuthology.orchestra.run.vm04.stdout: "access_key": "", 2026-03-08T23:56:34.472 INFO:teuthology.orchestra.run.vm04.stdout: "secret_key": "" 2026-03-08T23:56:34.472 INFO:teuthology.orchestra.run.vm04.stdout: }, 2026-03-08T23:56:34.472 INFO:teuthology.orchestra.run.vm04.stdout: "placement_pools": [ 2026-03-08T23:56:34.472 INFO:teuthology.orchestra.run.vm04.stdout: { 2026-03-08T23:56:34.472 INFO:teuthology.orchestra.run.vm04.stdout: "key": "default-placement", 2026-03-08T23:56:34.472 INFO:teuthology.orchestra.run.vm04.stdout: "val": { 2026-03-08T23:56:34.472 INFO:teuthology.orchestra.run.vm04.stdout: "index_pool": "z.rgw.buckets.index", 2026-03-08T23:56:34.472 INFO:teuthology.orchestra.run.vm04.stdout: "storage_classes": { 2026-03-08T23:56:34.472 INFO:teuthology.orchestra.run.vm04.stdout: "STANDARD": { 2026-03-08T23:56:34.472 INFO:teuthology.orchestra.run.vm04.stdout: "data_pool": "z.rgw.buckets.data" 2026-03-08T23:56:34.472 INFO:teuthology.orchestra.run.vm04.stdout: } 2026-03-08T23:56:34.472 INFO:teuthology.orchestra.run.vm04.stdout: }, 2026-03-08T23:56:34.472 INFO:teuthology.orchestra.run.vm04.stdout: "data_extra_pool": "z.rgw.buckets.non-ec", 2026-03-08T23:56:34.472 INFO:teuthology.orchestra.run.vm04.stdout: "index_type": 0 2026-03-08T23:56:34.472 INFO:teuthology.orchestra.run.vm04.stdout: } 2026-03-08T23:56:34.473 INFO:teuthology.orchestra.run.vm04.stdout: } 2026-03-08T23:56:34.473 INFO:teuthology.orchestra.run.vm04.stdout: ], 2026-03-08T23:56:34.473 INFO:teuthology.orchestra.run.vm04.stdout: "realm_id": "9f7e698e-39c6-493b-8124-1bd555e78347", 2026-03-08T23:56:34.473 INFO:teuthology.orchestra.run.vm04.stdout: "notif_pool": "z.rgw.log:notif" 2026-03-08T23:56:34.473 INFO:teuthology.orchestra.run.vm04.stdout:} 2026-03-08T23:56:34.511 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'radosgw-admin period update --rgw-realm=r --commit' 2026-03-08T23:56:35.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:35 vm10 ceph-mon[48982]: pgmap v13: 33 pgs: 32 unknown, 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-08T23:56:35.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:35 vm10 ceph-mon[48982]: osdmap e48: 8 total, 8 up, 8 in 2026-03-08T23:56:35.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:35 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:35.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:35 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:35.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:35 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:35.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:35 vm10 ceph-mon[48982]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-08T23:56:35.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:35 vm10 ceph-mon[48982]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-08T23:56:35.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:35 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:35.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:35 vm10 ceph-mon[48982]: Deploying daemon grafana.a on vm10 2026-03-08T23:56:35.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:35 vm04 ceph-mon[51053]: pgmap v13: 33 pgs: 32 unknown, 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-08T23:56:35.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:35 vm04 ceph-mon[51053]: osdmap e48: 8 total, 8 up, 8 in 2026-03-08T23:56:35.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:35 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:35.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:35 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:35.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:35 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:35.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:35 vm04 ceph-mon[51053]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-08T23:56:35.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:35 vm04 ceph-mon[51053]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-08T23:56:35.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:35 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:35.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:35 vm04 ceph-mon[51053]: Deploying daemon grafana.a on vm10 2026-03-08T23:56:35.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:35 vm04 ceph-mon[46823]: pgmap v13: 33 pgs: 32 unknown, 1 active+clean; 449 KiB data, 47 MiB used, 160 GiB / 160 GiB avail 2026-03-08T23:56:35.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:35 vm04 ceph-mon[46823]: osdmap e48: 8 total, 8 up, 8 in 2026-03-08T23:56:35.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:35 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:35.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:35 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:35.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:35 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:35.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:35 vm04 ceph-mon[46823]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-08T23:56:35.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:35 vm04 ceph-mon[46823]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-08T23:56:35.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:35 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:35.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:35 vm04 ceph-mon[46823]: Deploying daemon grafana.a on vm10 2026-03-08T23:56:36.600 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:56:36 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[68647]: level=info ts=2026-03-08T23:56:36.109Z caller=cluster.go:696 component=cluster msg="gossip not settled" polls=0 before=0 now=1 elapsed=2.001426933s 2026-03-08T23:56:37.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:37 vm04 ceph-mon[51053]: pgmap v15: 33 pgs: 33 active+clean; 449 KiB data, 49 MiB used, 160 GiB / 160 GiB avail; 682 B/s rd, 1.8 KiB/s wr, 2 op/s 2026-03-08T23:56:37.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:37 vm04 ceph-mon[51053]: osdmap e49: 8 total, 8 up, 8 in 2026-03-08T23:56:37.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:37 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/1856232625' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.log","app": "rgw"}]: dispatch 2026-03-08T23:56:37.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:37 vm04 ceph-mon[51053]: from='client.? ' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.log","app": "rgw"}]: dispatch 2026-03-08T23:56:37.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:37 vm04 ceph-mon[46823]: pgmap v15: 33 pgs: 33 active+clean; 449 KiB data, 49 MiB used, 160 GiB / 160 GiB avail; 682 B/s rd, 1.8 KiB/s wr, 2 op/s 2026-03-08T23:56:37.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:37 vm04 ceph-mon[46823]: osdmap e49: 8 total, 8 up, 8 in 2026-03-08T23:56:37.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:37 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/1856232625' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.log","app": "rgw"}]: dispatch 2026-03-08T23:56:37.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:37 vm04 ceph-mon[46823]: from='client.? ' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.log","app": "rgw"}]: dispatch 2026-03-08T23:56:37.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:37 vm10 ceph-mon[48982]: pgmap v15: 33 pgs: 33 active+clean; 449 KiB data, 49 MiB used, 160 GiB / 160 GiB avail; 682 B/s rd, 1.8 KiB/s wr, 2 op/s 2026-03-08T23:56:37.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:37 vm10 ceph-mon[48982]: osdmap e49: 8 total, 8 up, 8 in 2026-03-08T23:56:37.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:37 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/1856232625' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.log","app": "rgw"}]: dispatch 2026-03-08T23:56:37.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:37 vm10 ceph-mon[48982]: from='client.? ' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.log","app": "rgw"}]: dispatch 2026-03-08T23:56:38.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:38 vm04 ceph-mon[51053]: from='client.? ' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": "z.rgw.log","app": "rgw"}]': finished 2026-03-08T23:56:38.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:38 vm04 ceph-mon[51053]: osdmap e50: 8 total, 8 up, 8 in 2026-03-08T23:56:38.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:38 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:38.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:38 vm04 ceph-mon[51053]: osdmap e51: 8 total, 8 up, 8 in 2026-03-08T23:56:38.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:38 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/1856232625' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.control","app": "rgw"}]: dispatch 2026-03-08T23:56:38.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:38 vm04 ceph-mon[51053]: from='client.? ' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.control","app": "rgw"}]: dispatch 2026-03-08T23:56:38.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:38 vm04 ceph-mon[46823]: from='client.? ' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": "z.rgw.log","app": "rgw"}]': finished 2026-03-08T23:56:38.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:38 vm04 ceph-mon[46823]: osdmap e50: 8 total, 8 up, 8 in 2026-03-08T23:56:38.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:38 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:38.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:38 vm04 ceph-mon[46823]: osdmap e51: 8 total, 8 up, 8 in 2026-03-08T23:56:38.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:38 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/1856232625' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.control","app": "rgw"}]: dispatch 2026-03-08T23:56:38.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:38 vm04 ceph-mon[46823]: from='client.? ' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.control","app": "rgw"}]: dispatch 2026-03-08T23:56:38.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:38 vm10 ceph-mon[48982]: from='client.? ' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": "z.rgw.log","app": "rgw"}]': finished 2026-03-08T23:56:38.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:38 vm10 ceph-mon[48982]: osdmap e50: 8 total, 8 up, 8 in 2026-03-08T23:56:38.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:38 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:38.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:38 vm10 ceph-mon[48982]: osdmap e51: 8 total, 8 up, 8 in 2026-03-08T23:56:38.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:38 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/1856232625' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.control","app": "rgw"}]: dispatch 2026-03-08T23:56:38.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:38 vm10 ceph-mon[48982]: from='client.? ' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.control","app": "rgw"}]: dispatch 2026-03-08T23:56:39.327 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 08 23:56:38 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[50180]: ::ffff:192.168.123.110 - - [08/Mar/2026:23:56:38] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-08T23:56:39.328 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:56:39 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: ::ffff:192.168.123.110 - - [08/Mar/2026:23:56:39] "GET /metrics HTTP/1.1" 200 192201 "" "Prometheus/2.33.4" 2026-03-08T23:56:39.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:39 vm04 ceph-mon[51053]: pgmap v18: 65 pgs: 32 unknown, 33 active+clean; 449 KiB data, 49 MiB used, 160 GiB / 160 GiB avail; 832 B/s rd, 2.2 KiB/s wr, 3 op/s 2026-03-08T23:56:39.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:39 vm04 ceph-mon[46823]: pgmap v18: 65 pgs: 32 unknown, 33 active+clean; 449 KiB data, 49 MiB used, 160 GiB / 160 GiB avail; 832 B/s rd, 2.2 KiB/s wr, 3 op/s 2026-03-08T23:56:39.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:39 vm10 ceph-mon[48982]: pgmap v18: 65 pgs: 32 unknown, 33 active+clean; 449 KiB data, 49 MiB used, 160 GiB / 160 GiB avail; 832 B/s rd, 2.2 KiB/s wr, 3 op/s 2026-03-08T23:56:40.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:40 vm04 ceph-mon[51053]: from='client.? ' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": "z.rgw.control","app": "rgw"}]': finished 2026-03-08T23:56:40.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:40 vm04 ceph-mon[51053]: osdmap e52: 8 total, 8 up, 8 in 2026-03-08T23:56:40.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:40 vm04 ceph-mon[51053]: pgmap v21: 97 pgs: 32 creating+peering, 65 active+clean; 451 KiB data, 50 MiB used, 160 GiB / 160 GiB avail; 6.5 KiB/s rd, 1.2 KiB/s wr, 8 op/s 2026-03-08T23:56:40.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:40 vm04 ceph-mon[51053]: osdmap e53: 8 total, 8 up, 8 in 2026-03-08T23:56:40.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:40 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/1339416265' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.meta","app": "rgw"}]: dispatch 2026-03-08T23:56:40.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:40 vm04 ceph-mon[51053]: from='client.? ' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.meta","app": "rgw"}]: dispatch 2026-03-08T23:56:40.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:40 vm04 ceph-mon[46823]: from='client.? ' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": "z.rgw.control","app": "rgw"}]': finished 2026-03-08T23:56:40.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:40 vm04 ceph-mon[46823]: osdmap e52: 8 total, 8 up, 8 in 2026-03-08T23:56:40.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:40 vm04 ceph-mon[46823]: pgmap v21: 97 pgs: 32 creating+peering, 65 active+clean; 451 KiB data, 50 MiB used, 160 GiB / 160 GiB avail; 6.5 KiB/s rd, 1.2 KiB/s wr, 8 op/s 2026-03-08T23:56:40.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:40 vm04 ceph-mon[46823]: osdmap e53: 8 total, 8 up, 8 in 2026-03-08T23:56:40.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:40 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/1339416265' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.meta","app": "rgw"}]: dispatch 2026-03-08T23:56:40.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:40 vm04 ceph-mon[46823]: from='client.? ' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.meta","app": "rgw"}]: dispatch 2026-03-08T23:56:40.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:40 vm10 ceph-mon[48982]: from='client.? ' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": "z.rgw.control","app": "rgw"}]': finished 2026-03-08T23:56:40.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:40 vm10 ceph-mon[48982]: osdmap e52: 8 total, 8 up, 8 in 2026-03-08T23:56:40.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:40 vm10 ceph-mon[48982]: pgmap v21: 97 pgs: 32 creating+peering, 65 active+clean; 451 KiB data, 50 MiB used, 160 GiB / 160 GiB avail; 6.5 KiB/s rd, 1.2 KiB/s wr, 8 op/s 2026-03-08T23:56:40.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:40 vm10 ceph-mon[48982]: osdmap e53: 8 total, 8 up, 8 in 2026-03-08T23:56:40.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:40 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/1339416265' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.meta","app": "rgw"}]: dispatch 2026-03-08T23:56:40.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:40 vm10 ceph-mon[48982]: from='client.? ' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.meta","app": "rgw"}]: dispatch 2026-03-08T23:56:42.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:42 vm04 ceph-mon[51053]: from='client.? ' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": "z.rgw.meta","app": "rgw"}]': finished 2026-03-08T23:56:42.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:42 vm04 ceph-mon[51053]: osdmap e54: 8 total, 8 up, 8 in 2026-03-08T23:56:42.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:42 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/1339416265' entity='client.admin' cmd=[{"prefix": "osd pool set", "pool": "z.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]: dispatch 2026-03-08T23:56:42.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:42 vm04 ceph-mon[51053]: from='client.? ' entity='client.admin' cmd=[{"prefix": "osd pool set", "pool": "z.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]: dispatch 2026-03-08T23:56:42.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:42 vm04 ceph-mon[51053]: pgmap v24: 129 pgs: 32 unknown, 32 creating+peering, 65 active+clean; 451 KiB data, 50 MiB used, 160 GiB / 160 GiB avail; 6.5 KiB/s rd, 1.2 KiB/s wr, 8 op/s 2026-03-08T23:56:42.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:42 vm04 ceph-mon[46823]: from='client.? ' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": "z.rgw.meta","app": "rgw"}]': finished 2026-03-08T23:56:42.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:42 vm04 ceph-mon[46823]: osdmap e54: 8 total, 8 up, 8 in 2026-03-08T23:56:42.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:42 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/1339416265' entity='client.admin' cmd=[{"prefix": "osd pool set", "pool": "z.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]: dispatch 2026-03-08T23:56:42.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:42 vm04 ceph-mon[46823]: from='client.? ' entity='client.admin' cmd=[{"prefix": "osd pool set", "pool": "z.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]: dispatch 2026-03-08T23:56:42.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:42 vm04 ceph-mon[46823]: pgmap v24: 129 pgs: 32 unknown, 32 creating+peering, 65 active+clean; 451 KiB data, 50 MiB used, 160 GiB / 160 GiB avail; 6.5 KiB/s rd, 1.2 KiB/s wr, 8 op/s 2026-03-08T23:56:42.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:42 vm10 ceph-mon[48982]: from='client.? ' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": "z.rgw.meta","app": "rgw"}]': finished 2026-03-08T23:56:42.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:42 vm10 ceph-mon[48982]: osdmap e54: 8 total, 8 up, 8 in 2026-03-08T23:56:42.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:42 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/1339416265' entity='client.admin' cmd=[{"prefix": "osd pool set", "pool": "z.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]: dispatch 2026-03-08T23:56:42.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:42 vm10 ceph-mon[48982]: from='client.? ' entity='client.admin' cmd=[{"prefix": "osd pool set", "pool": "z.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]: dispatch 2026-03-08T23:56:42.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:42 vm10 ceph-mon[48982]: pgmap v24: 129 pgs: 32 unknown, 32 creating+peering, 65 active+clean; 451 KiB data, 50 MiB used, 160 GiB / 160 GiB avail; 6.5 KiB/s rd, 1.2 KiB/s wr, 8 op/s 2026-03-08T23:56:43.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:43 vm04 ceph-mon[51053]: from='client.? ' entity='client.admin' cmd='[{"prefix": "osd pool set", "pool": "z.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]': finished 2026-03-08T23:56:43.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:43 vm04 ceph-mon[51053]: osdmap e55: 8 total, 8 up, 8 in 2026-03-08T23:56:43.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:43 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/1339416265' entity='client.admin' cmd=[{"prefix": "osd pool set", "pool": "z.rgw.meta", "var": "pg_num_min", "val": "8"}]: dispatch 2026-03-08T23:56:43.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:43 vm04 ceph-mon[51053]: from='client.? ' entity='client.admin' cmd=[{"prefix": "osd pool set", "pool": "z.rgw.meta", "var": "pg_num_min", "val": "8"}]: dispatch 2026-03-08T23:56:43.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:43 vm04 ceph-mon[51053]: from='client.? ' entity='client.admin' cmd='[{"prefix": "osd pool set", "pool": "z.rgw.meta", "var": "pg_num_min", "val": "8"}]': finished 2026-03-08T23:56:43.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:43 vm04 ceph-mon[51053]: osdmap e56: 8 total, 8 up, 8 in 2026-03-08T23:56:43.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:43 vm04 ceph-mon[46823]: from='client.? ' entity='client.admin' cmd='[{"prefix": "osd pool set", "pool": "z.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]': finished 2026-03-08T23:56:43.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:43 vm04 ceph-mon[46823]: osdmap e55: 8 total, 8 up, 8 in 2026-03-08T23:56:43.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:43 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/1339416265' entity='client.admin' cmd=[{"prefix": "osd pool set", "pool": "z.rgw.meta", "var": "pg_num_min", "val": "8"}]: dispatch 2026-03-08T23:56:43.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:43 vm04 ceph-mon[46823]: from='client.? ' entity='client.admin' cmd=[{"prefix": "osd pool set", "pool": "z.rgw.meta", "var": "pg_num_min", "val": "8"}]: dispatch 2026-03-08T23:56:43.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:43 vm04 ceph-mon[46823]: from='client.? ' entity='client.admin' cmd='[{"prefix": "osd pool set", "pool": "z.rgw.meta", "var": "pg_num_min", "val": "8"}]': finished 2026-03-08T23:56:43.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:43 vm04 ceph-mon[46823]: osdmap e56: 8 total, 8 up, 8 in 2026-03-08T23:56:43.661 INFO:teuthology.orchestra.run.vm04.stdout:{ 2026-03-08T23:56:43.661 INFO:teuthology.orchestra.run.vm04.stdout: "id": "7b59697c-6318-450c-a2fa-42ae5b0af04e", 2026-03-08T23:56:43.661 INFO:teuthology.orchestra.run.vm04.stdout: "epoch": 1, 2026-03-08T23:56:43.661 INFO:teuthology.orchestra.run.vm04.stdout: "predecessor_uuid": "e6112855-1729-4ded-8b84-2ccf75c809c3", 2026-03-08T23:56:43.661 INFO:teuthology.orchestra.run.vm04.stdout: "sync_status": [], 2026-03-08T23:56:43.661 INFO:teuthology.orchestra.run.vm04.stdout: "period_map": { 2026-03-08T23:56:43.662 INFO:teuthology.orchestra.run.vm04.stdout: "id": "7b59697c-6318-450c-a2fa-42ae5b0af04e", 2026-03-08T23:56:43.662 INFO:teuthology.orchestra.run.vm04.stdout: "zonegroups": [ 2026-03-08T23:56:43.662 INFO:teuthology.orchestra.run.vm04.stdout: { 2026-03-08T23:56:43.662 INFO:teuthology.orchestra.run.vm04.stdout: "id": "abd026c4-4605-4641-9f60-0695221707ac", 2026-03-08T23:56:43.662 INFO:teuthology.orchestra.run.vm04.stdout: "name": "default", 2026-03-08T23:56:43.662 INFO:teuthology.orchestra.run.vm04.stdout: "api_name": "default", 2026-03-08T23:56:43.662 INFO:teuthology.orchestra.run.vm04.stdout: "is_master": "true", 2026-03-08T23:56:43.662 INFO:teuthology.orchestra.run.vm04.stdout: "endpoints": [], 2026-03-08T23:56:43.662 INFO:teuthology.orchestra.run.vm04.stdout: "hostnames": [], 2026-03-08T23:56:43.662 INFO:teuthology.orchestra.run.vm04.stdout: "hostnames_s3website": [], 2026-03-08T23:56:43.662 INFO:teuthology.orchestra.run.vm04.stdout: "master_zone": "f9cb6fe5-b225-4a57-a859-d82574b3c584", 2026-03-08T23:56:43.662 INFO:teuthology.orchestra.run.vm04.stdout: "zones": [ 2026-03-08T23:56:43.662 INFO:teuthology.orchestra.run.vm04.stdout: { 2026-03-08T23:56:43.662 INFO:teuthology.orchestra.run.vm04.stdout: "id": "f9cb6fe5-b225-4a57-a859-d82574b3c584", 2026-03-08T23:56:43.662 INFO:teuthology.orchestra.run.vm04.stdout: "name": "z", 2026-03-08T23:56:43.662 INFO:teuthology.orchestra.run.vm04.stdout: "endpoints": [], 2026-03-08T23:56:43.662 INFO:teuthology.orchestra.run.vm04.stdout: "log_meta": "false", 2026-03-08T23:56:43.662 INFO:teuthology.orchestra.run.vm04.stdout: "log_data": "false", 2026-03-08T23:56:43.662 INFO:teuthology.orchestra.run.vm04.stdout: "bucket_index_max_shards": 11, 2026-03-08T23:56:43.662 INFO:teuthology.orchestra.run.vm04.stdout: "read_only": "false", 2026-03-08T23:56:43.662 INFO:teuthology.orchestra.run.vm04.stdout: "tier_type": "", 2026-03-08T23:56:43.662 INFO:teuthology.orchestra.run.vm04.stdout: "sync_from_all": "true", 2026-03-08T23:56:43.662 INFO:teuthology.orchestra.run.vm04.stdout: "sync_from": [], 2026-03-08T23:56:43.662 INFO:teuthology.orchestra.run.vm04.stdout: "redirect_zone": "" 2026-03-08T23:56:43.662 INFO:teuthology.orchestra.run.vm04.stdout: } 2026-03-08T23:56:43.662 INFO:teuthology.orchestra.run.vm04.stdout: ], 2026-03-08T23:56:43.662 INFO:teuthology.orchestra.run.vm04.stdout: "placement_targets": [ 2026-03-08T23:56:43.662 INFO:teuthology.orchestra.run.vm04.stdout: { 2026-03-08T23:56:43.662 INFO:teuthology.orchestra.run.vm04.stdout: "name": "default-placement", 2026-03-08T23:56:43.662 INFO:teuthology.orchestra.run.vm04.stdout: "tags": [], 2026-03-08T23:56:43.662 INFO:teuthology.orchestra.run.vm04.stdout: "storage_classes": [ 2026-03-08T23:56:43.662 INFO:teuthology.orchestra.run.vm04.stdout: "STANDARD" 2026-03-08T23:56:43.662 INFO:teuthology.orchestra.run.vm04.stdout: ] 2026-03-08T23:56:43.662 INFO:teuthology.orchestra.run.vm04.stdout: } 2026-03-08T23:56:43.662 INFO:teuthology.orchestra.run.vm04.stdout: ], 2026-03-08T23:56:43.662 INFO:teuthology.orchestra.run.vm04.stdout: "default_placement": "default-placement", 2026-03-08T23:56:43.662 INFO:teuthology.orchestra.run.vm04.stdout: "realm_id": "9f7e698e-39c6-493b-8124-1bd555e78347", 2026-03-08T23:56:43.662 INFO:teuthology.orchestra.run.vm04.stdout: "sync_policy": { 2026-03-08T23:56:43.662 INFO:teuthology.orchestra.run.vm04.stdout: "groups": [] 2026-03-08T23:56:43.662 INFO:teuthology.orchestra.run.vm04.stdout: } 2026-03-08T23:56:43.662 INFO:teuthology.orchestra.run.vm04.stdout: } 2026-03-08T23:56:43.662 INFO:teuthology.orchestra.run.vm04.stdout: ], 2026-03-08T23:56:43.662 INFO:teuthology.orchestra.run.vm04.stdout: "short_zone_ids": [ 2026-03-08T23:56:43.662 INFO:teuthology.orchestra.run.vm04.stdout: { 2026-03-08T23:56:43.662 INFO:teuthology.orchestra.run.vm04.stdout: "key": "f9cb6fe5-b225-4a57-a859-d82574b3c584", 2026-03-08T23:56:43.662 INFO:teuthology.orchestra.run.vm04.stdout: "val": 4178596507 2026-03-08T23:56:43.662 INFO:teuthology.orchestra.run.vm04.stdout: } 2026-03-08T23:56:43.662 INFO:teuthology.orchestra.run.vm04.stdout: ] 2026-03-08T23:56:43.662 INFO:teuthology.orchestra.run.vm04.stdout: }, 2026-03-08T23:56:43.662 INFO:teuthology.orchestra.run.vm04.stdout: "master_zonegroup": "abd026c4-4605-4641-9f60-0695221707ac", 2026-03-08T23:56:43.662 INFO:teuthology.orchestra.run.vm04.stdout: "master_zone": "f9cb6fe5-b225-4a57-a859-d82574b3c584", 2026-03-08T23:56:43.662 INFO:teuthology.orchestra.run.vm04.stdout: "period_config": { 2026-03-08T23:56:43.662 INFO:teuthology.orchestra.run.vm04.stdout: "bucket_quota": { 2026-03-08T23:56:43.662 INFO:teuthology.orchestra.run.vm04.stdout: "enabled": false, 2026-03-08T23:56:43.662 INFO:teuthology.orchestra.run.vm04.stdout: "check_on_raw": false, 2026-03-08T23:56:43.662 INFO:teuthology.orchestra.run.vm04.stdout: "max_size": -1, 2026-03-08T23:56:43.662 INFO:teuthology.orchestra.run.vm04.stdout: "max_size_kb": 0, 2026-03-08T23:56:43.662 INFO:teuthology.orchestra.run.vm04.stdout: "max_objects": -1 2026-03-08T23:56:43.662 INFO:teuthology.orchestra.run.vm04.stdout: }, 2026-03-08T23:56:43.662 INFO:teuthology.orchestra.run.vm04.stdout: "user_quota": { 2026-03-08T23:56:43.662 INFO:teuthology.orchestra.run.vm04.stdout: "enabled": false, 2026-03-08T23:56:43.663 INFO:teuthology.orchestra.run.vm04.stdout: "check_on_raw": false, 2026-03-08T23:56:43.663 INFO:teuthology.orchestra.run.vm04.stdout: "max_size": -1, 2026-03-08T23:56:43.663 INFO:teuthology.orchestra.run.vm04.stdout: "max_size_kb": 0, 2026-03-08T23:56:43.663 INFO:teuthology.orchestra.run.vm04.stdout: "max_objects": -1 2026-03-08T23:56:43.663 INFO:teuthology.orchestra.run.vm04.stdout: }, 2026-03-08T23:56:43.663 INFO:teuthology.orchestra.run.vm04.stdout: "user_ratelimit": { 2026-03-08T23:56:43.663 INFO:teuthology.orchestra.run.vm04.stdout: "max_read_ops": 0, 2026-03-08T23:56:43.663 INFO:teuthology.orchestra.run.vm04.stdout: "max_write_ops": 0, 2026-03-08T23:56:43.663 INFO:teuthology.orchestra.run.vm04.stdout: "max_read_bytes": 0, 2026-03-08T23:56:43.663 INFO:teuthology.orchestra.run.vm04.stdout: "max_write_bytes": 0, 2026-03-08T23:56:43.663 INFO:teuthology.orchestra.run.vm04.stdout: "enabled": false 2026-03-08T23:56:43.663 INFO:teuthology.orchestra.run.vm04.stdout: }, 2026-03-08T23:56:43.663 INFO:teuthology.orchestra.run.vm04.stdout: "bucket_ratelimit": { 2026-03-08T23:56:43.663 INFO:teuthology.orchestra.run.vm04.stdout: "max_read_ops": 0, 2026-03-08T23:56:43.663 INFO:teuthology.orchestra.run.vm04.stdout: "max_write_ops": 0, 2026-03-08T23:56:43.663 INFO:teuthology.orchestra.run.vm04.stdout: "max_read_bytes": 0, 2026-03-08T23:56:43.663 INFO:teuthology.orchestra.run.vm04.stdout: "max_write_bytes": 0, 2026-03-08T23:56:43.663 INFO:teuthology.orchestra.run.vm04.stdout: "enabled": false 2026-03-08T23:56:43.663 INFO:teuthology.orchestra.run.vm04.stdout: }, 2026-03-08T23:56:43.663 INFO:teuthology.orchestra.run.vm04.stdout: "anonymous_ratelimit": { 2026-03-08T23:56:43.663 INFO:teuthology.orchestra.run.vm04.stdout: "max_read_ops": 0, 2026-03-08T23:56:43.663 INFO:teuthology.orchestra.run.vm04.stdout: "max_write_ops": 0, 2026-03-08T23:56:43.663 INFO:teuthology.orchestra.run.vm04.stdout: "max_read_bytes": 0, 2026-03-08T23:56:43.663 INFO:teuthology.orchestra.run.vm04.stdout: "max_write_bytes": 0, 2026-03-08T23:56:43.663 INFO:teuthology.orchestra.run.vm04.stdout: "enabled": false 2026-03-08T23:56:43.663 INFO:teuthology.orchestra.run.vm04.stdout: } 2026-03-08T23:56:43.663 INFO:teuthology.orchestra.run.vm04.stdout: }, 2026-03-08T23:56:43.663 INFO:teuthology.orchestra.run.vm04.stdout: "realm_id": "9f7e698e-39c6-493b-8124-1bd555e78347", 2026-03-08T23:56:43.663 INFO:teuthology.orchestra.run.vm04.stdout: "realm_name": "r", 2026-03-08T23:56:43.663 INFO:teuthology.orchestra.run.vm04.stdout: "realm_epoch": 2 2026-03-08T23:56:43.663 INFO:teuthology.orchestra.run.vm04.stdout:} 2026-03-08T23:56:43.715 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch apply rgw foo --realm r --zone z --placement=2 --port=8000' 2026-03-08T23:56:43.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:43 vm10 ceph-mon[48982]: from='client.? ' entity='client.admin' cmd='[{"prefix": "osd pool set", "pool": "z.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]': finished 2026-03-08T23:56:43.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:43 vm10 ceph-mon[48982]: osdmap e55: 8 total, 8 up, 8 in 2026-03-08T23:56:43.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:43 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/1339416265' entity='client.admin' cmd=[{"prefix": "osd pool set", "pool": "z.rgw.meta", "var": "pg_num_min", "val": "8"}]: dispatch 2026-03-08T23:56:43.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:43 vm10 ceph-mon[48982]: from='client.? ' entity='client.admin' cmd=[{"prefix": "osd pool set", "pool": "z.rgw.meta", "var": "pg_num_min", "val": "8"}]: dispatch 2026-03-08T23:56:43.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:43 vm10 ceph-mon[48982]: from='client.? ' entity='client.admin' cmd='[{"prefix": "osd pool set", "pool": "z.rgw.meta", "var": "pg_num_min", "val": "8"}]': finished 2026-03-08T23:56:43.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:43 vm10 ceph-mon[48982]: osdmap e56: 8 total, 8 up, 8 in 2026-03-08T23:56:44.197 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:56:44 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[68647]: level=info ts=2026-03-08T23:56:44.111Z caller=cluster.go:688 component=cluster msg="gossip settled; proceeding" elapsed=10.004353021s 2026-03-08T23:56:44.729 INFO:teuthology.orchestra.run.vm04.stdout:Scheduled rgw.foo update... 2026-03-08T23:56:44.783 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph osd pool create foo' 2026-03-08T23:56:44.979 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:44 vm04 ceph-mon[46823]: pgmap v27: 129 pgs: 32 unknown, 32 creating+peering, 65 active+clean; 451 KiB data, 50 MiB used, 160 GiB / 160 GiB avail 2026-03-08T23:56:44.979 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:44 vm04 ceph-mon[51053]: pgmap v27: 129 pgs: 32 unknown, 32 creating+peering, 65 active+clean; 451 KiB data, 50 MiB used, 160 GiB / 160 GiB avail 2026-03-08T23:56:45.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:44 vm10 ceph-mon[48982]: pgmap v27: 129 pgs: 32 unknown, 32 creating+peering, 65 active+clean; 451 KiB data, 50 MiB used, 160 GiB / 160 GiB avail 2026-03-08T23:56:45.765 INFO:teuthology.orchestra.run.vm04.stderr:pool 'foo' created 2026-03-08T23:56:45.814 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'rbd pool init foo' 2026-03-08T23:56:45.984 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:45 vm04 ceph-mon[46823]: from='client.24493 -' entity='client.admin' cmd=[{"prefix": "orch apply rgw", "svc_id": "foo", "realm": "r", "zone": "z", "placement": "2", "port": 8000, "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:56:45.984 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:45 vm04 ceph-mon[46823]: Saving service rgw.foo spec with placement count:2 2026-03-08T23:56:45.984 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:45 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:45.984 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:45 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/4163113932' entity='client.admin' cmd=[{"prefix": "osd pool create", "pool": "foo"}]: dispatch 2026-03-08T23:56:45.984 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:45 vm04 ceph-mon[46823]: from='client.? ' entity='client.admin' cmd=[{"prefix": "osd pool create", "pool": "foo"}]: dispatch 2026-03-08T23:56:45.984 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:45 vm04 ceph-mon[51053]: from='client.24493 -' entity='client.admin' cmd=[{"prefix": "orch apply rgw", "svc_id": "foo", "realm": "r", "zone": "z", "placement": "2", "port": 8000, "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:56:45.984 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:45 vm04 ceph-mon[51053]: Saving service rgw.foo spec with placement count:2 2026-03-08T23:56:45.985 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:45 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:45.985 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:45 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/4163113932' entity='client.admin' cmd=[{"prefix": "osd pool create", "pool": "foo"}]: dispatch 2026-03-08T23:56:45.985 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:45 vm04 ceph-mon[51053]: from='client.? ' entity='client.admin' cmd=[{"prefix": "osd pool create", "pool": "foo"}]: dispatch 2026-03-08T23:56:46.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:45 vm10 ceph-mon[48982]: from='client.24493 -' entity='client.admin' cmd=[{"prefix": "orch apply rgw", "svc_id": "foo", "realm": "r", "zone": "z", "placement": "2", "port": 8000, "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:56:46.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:45 vm10 ceph-mon[48982]: Saving service rgw.foo spec with placement count:2 2026-03-08T23:56:46.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:45 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:46.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:45 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/4163113932' entity='client.admin' cmd=[{"prefix": "osd pool create", "pool": "foo"}]: dispatch 2026-03-08T23:56:46.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:45 vm10 ceph-mon[48982]: from='client.? ' entity='client.admin' cmd=[{"prefix": "osd pool create", "pool": "foo"}]: dispatch 2026-03-08T23:56:47.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:46 vm10 ceph-mon[48982]: from='client.? ' entity='client.admin' cmd='[{"prefix": "osd pool create", "pool": "foo"}]': finished 2026-03-08T23:56:47.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:46 vm10 ceph-mon[48982]: osdmap e57: 8 total, 8 up, 8 in 2026-03-08T23:56:47.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:46 vm10 ceph-mon[48982]: pgmap v29: 161 pgs: 32 unknown, 129 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 1.8 KiB/s rd, 2.0 KiB/s wr, 5 op/s 2026-03-08T23:56:47.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:46 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/226610314' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "foo","app": "rbd"}]: dispatch 2026-03-08T23:56:47.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:46 vm10 ceph-mon[48982]: from='client.? ' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "foo","app": "rbd"}]: dispatch 2026-03-08T23:56:47.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:46 vm04 ceph-mon[51053]: from='client.? ' entity='client.admin' cmd='[{"prefix": "osd pool create", "pool": "foo"}]': finished 2026-03-08T23:56:47.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:46 vm04 ceph-mon[51053]: osdmap e57: 8 total, 8 up, 8 in 2026-03-08T23:56:47.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:46 vm04 ceph-mon[51053]: pgmap v29: 161 pgs: 32 unknown, 129 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 1.8 KiB/s rd, 2.0 KiB/s wr, 5 op/s 2026-03-08T23:56:47.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:46 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/226610314' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "foo","app": "rbd"}]: dispatch 2026-03-08T23:56:47.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:46 vm04 ceph-mon[51053]: from='client.? ' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "foo","app": "rbd"}]: dispatch 2026-03-08T23:56:47.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:46 vm04 ceph-mon[46823]: from='client.? ' entity='client.admin' cmd='[{"prefix": "osd pool create", "pool": "foo"}]': finished 2026-03-08T23:56:47.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:46 vm04 ceph-mon[46823]: osdmap e57: 8 total, 8 up, 8 in 2026-03-08T23:56:47.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:46 vm04 ceph-mon[46823]: pgmap v29: 161 pgs: 32 unknown, 129 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 1.8 KiB/s rd, 2.0 KiB/s wr, 5 op/s 2026-03-08T23:56:47.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:46 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/226610314' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "foo","app": "rbd"}]: dispatch 2026-03-08T23:56:47.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:46 vm04 ceph-mon[46823]: from='client.? ' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "foo","app": "rbd"}]: dispatch 2026-03-08T23:56:48.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:47 vm10 ceph-mon[48982]: from='client.? ' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": "foo","app": "rbd"}]': finished 2026-03-08T23:56:48.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:47 vm10 ceph-mon[48982]: osdmap e58: 8 total, 8 up, 8 in 2026-03-08T23:56:48.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:47 vm04 ceph-mon[51053]: from='client.? ' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": "foo","app": "rbd"}]': finished 2026-03-08T23:56:48.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:47 vm04 ceph-mon[51053]: osdmap e58: 8 total, 8 up, 8 in 2026-03-08T23:56:48.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:47 vm04 ceph-mon[46823]: from='client.? ' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": "foo","app": "rbd"}]': finished 2026-03-08T23:56:48.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:47 vm04 ceph-mon[46823]: osdmap e58: 8 total, 8 up, 8 in 2026-03-08T23:56:48.839 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch apply iscsi foo u p' 2026-03-08T23:56:49.029 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:48 vm04 ceph-mon[46823]: osdmap e59: 8 total, 8 up, 8 in 2026-03-08T23:56:49.029 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:48 vm04 ceph-mon[46823]: pgmap v32: 161 pgs: 32 unknown, 129 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 1.8 KiB/s rd, 2.0 KiB/s wr, 5 op/s 2026-03-08T23:56:49.029 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:48 vm04 ceph-mon[51053]: osdmap e59: 8 total, 8 up, 8 in 2026-03-08T23:56:49.029 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:48 vm04 ceph-mon[51053]: pgmap v32: 161 pgs: 32 unknown, 129 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 1.8 KiB/s rd, 2.0 KiB/s wr, 5 op/s 2026-03-08T23:56:49.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:48 vm10 ceph-mon[48982]: osdmap e59: 8 total, 8 up, 8 in 2026-03-08T23:56:49.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:48 vm10 ceph-mon[48982]: pgmap v32: 161 pgs: 32 unknown, 129 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 1.8 KiB/s rd, 2.0 KiB/s wr, 5 op/s 2026-03-08T23:56:49.078 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 08 23:56:48 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[50180]: ::ffff:192.168.123.110 - - [08/Mar/2026:23:56:48] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-08T23:56:49.350 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:56:49 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: ::ffff:192.168.123.110 - - [08/Mar/2026:23:56:49] "GET /metrics HTTP/1.1" 200 197495 "" "Prometheus/2.33.4" 2026-03-08T23:56:49.376 INFO:teuthology.orchestra.run.vm04.stdout:Scheduled iscsi.foo update... 2026-03-08T23:56:49.430 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'sleep 180' 2026-03-08T23:56:50.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:49 vm10 ceph-mon[48982]: osdmap e60: 8 total, 8 up, 8 in 2026-03-08T23:56:50.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:49 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:50.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:49 vm04 ceph-mon[51053]: osdmap e60: 8 total, 8 up, 8 in 2026-03-08T23:56:50.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:49 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:50.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:49 vm04 ceph-mon[46823]: osdmap e60: 8 total, 8 up, 8 in 2026-03-08T23:56:50.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:49 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:51.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:50 vm10 ceph-mon[48982]: from='client.24511 -' entity='client.admin' cmd=[{"prefix": "orch apply iscsi", "pool": "foo", "api_user": "u", "api_password": "p", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:56:51.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:50 vm10 ceph-mon[48982]: Saving service iscsi.foo spec with placement count:1 2026-03-08T23:56:51.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:50 vm10 ceph-mon[48982]: pgmap v34: 161 pgs: 161 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 501 B/s wr, 0 op/s 2026-03-08T23:56:51.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:50 vm04 ceph-mon[51053]: from='client.24511 -' entity='client.admin' cmd=[{"prefix": "orch apply iscsi", "pool": "foo", "api_user": "u", "api_password": "p", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:56:51.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:50 vm04 ceph-mon[51053]: Saving service iscsi.foo spec with placement count:1 2026-03-08T23:56:51.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:50 vm04 ceph-mon[51053]: pgmap v34: 161 pgs: 161 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 501 B/s wr, 0 op/s 2026-03-08T23:56:51.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:50 vm04 ceph-mon[46823]: from='client.24511 -' entity='client.admin' cmd=[{"prefix": "orch apply iscsi", "pool": "foo", "api_user": "u", "api_password": "p", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:56:51.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:50 vm04 ceph-mon[46823]: Saving service iscsi.foo spec with placement count:1 2026-03-08T23:56:51.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:50 vm04 ceph-mon[46823]: pgmap v34: 161 pgs: 161 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 501 B/s wr, 0 op/s 2026-03-08T23:56:53.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:52 vm10 ceph-mon[48982]: pgmap v35: 161 pgs: 161 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 341 B/s wr, 0 op/s 2026-03-08T23:56:53.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:52 vm04 ceph-mon[51053]: pgmap v35: 161 pgs: 161 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 341 B/s wr, 0 op/s 2026-03-08T23:56:53.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:52 vm04 ceph-mon[46823]: pgmap v35: 161 pgs: 161 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 341 B/s wr, 0 op/s 2026-03-08T23:56:53.963 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:53 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:53+0000 lvl=info msg="Executing migration" logger=migrator id="Rename table dashboard to dashboard_v1 - v1" 2026-03-08T23:56:54.218 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:53 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:53+0000 lvl=info msg="Executing migration" logger=migrator id="create dashboard v2" 2026-03-08T23:56:54.218 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:53 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:53+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_dashboard_org_id - v2" 2026-03-08T23:56:54.218 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:53 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:53+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_dashboard_org_id_slug - v2" 2026-03-08T23:56:54.219 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:53 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:53+0000 lvl=info msg="Executing migration" logger=migrator id="copy dashboard v1 to v2" 2026-03-08T23:56:54.219 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:53 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:53+0000 lvl=info msg="Executing migration" logger=migrator id="drop table dashboard_v1" 2026-03-08T23:56:54.219 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:53 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:53+0000 lvl=info msg="Executing migration" logger=migrator id="alter dashboard.data to mediumtext v1" 2026-03-08T23:56:54.219 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:53 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:53+0000 lvl=info msg="Executing migration" logger=migrator id="Add column updated_by in dashboard - v2" 2026-03-08T23:56:54.219 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:53 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:53+0000 lvl=info msg="Executing migration" logger=migrator id="Add column created_by in dashboard - v2" 2026-03-08T23:56:54.219 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:53 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:53+0000 lvl=info msg="Executing migration" logger=migrator id="Add column gnetId in dashboard" 2026-03-08T23:56:54.219 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:53 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:53+0000 lvl=info msg="Executing migration" logger=migrator id="Add index for gnetId in dashboard" 2026-03-08T23:56:54.219 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:53 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:53+0000 lvl=info msg="Executing migration" logger=migrator id="Add column plugin_id in dashboard" 2026-03-08T23:56:54.219 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:53 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:53+0000 lvl=info msg="Executing migration" logger=migrator id="Add index for plugin_id in dashboard" 2026-03-08T23:56:54.219 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:53 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:53+0000 lvl=info msg="Executing migration" logger=migrator id="Add index for dashboard_id in dashboard_tag" 2026-03-08T23:56:54.219 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:53 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:53+0000 lvl=info msg="Executing migration" logger=migrator id="Update dashboard table charset" 2026-03-08T23:56:54.219 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:53 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:53+0000 lvl=info msg="Executing migration" logger=migrator id="Update dashboard_tag table charset" 2026-03-08T23:56:54.219 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:53 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:53+0000 lvl=info msg="Executing migration" logger=migrator id="Add column folder_id in dashboard" 2026-03-08T23:56:54.219 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:53 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:53+0000 lvl=info msg="Executing migration" logger=migrator id="Add column isFolder in dashboard" 2026-03-08T23:56:54.219 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:53 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:53+0000 lvl=info msg="Executing migration" logger=migrator id="Add column has_acl in dashboard" 2026-03-08T23:56:54.219 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:53 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:53+0000 lvl=info msg="Executing migration" logger=migrator id="Add column uid in dashboard" 2026-03-08T23:56:54.219 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:53 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:53+0000 lvl=info msg="Executing migration" logger=migrator id="Update uid column values in dashboard" 2026-03-08T23:56:54.219 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:53 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:53+0000 lvl=info msg="Executing migration" logger=migrator id="Add unique index dashboard_org_id_uid" 2026-03-08T23:56:54.219 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:53 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:53+0000 lvl=info msg="Executing migration" logger=migrator id="Remove unique index org_id_slug" 2026-03-08T23:56:54.219 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:53 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:53+0000 lvl=info msg="Executing migration" logger=migrator id="Update dashboard title length" 2026-03-08T23:56:54.219 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:53 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:53+0000 lvl=info msg="Executing migration" logger=migrator id="Add unique index for dashboard_org_id_title_folder_id" 2026-03-08T23:56:54.219 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:53 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:53+0000 lvl=info msg="Executing migration" logger=migrator id="create dashboard_provisioning" 2026-03-08T23:56:54.219 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:53 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:53+0000 lvl=info msg="Executing migration" logger=migrator id="Rename table dashboard_provisioning to dashboard_provisioning_tmp_qwerty - v1" 2026-03-08T23:56:54.219 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:53 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:53+0000 lvl=info msg="Executing migration" logger=migrator id="create dashboard_provisioning v2" 2026-03-08T23:56:54.219 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:53 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:53+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_dashboard_provisioning_dashboard_id - v2" 2026-03-08T23:56:54.219 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:53 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:53+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_dashboard_provisioning_dashboard_id_name - v2" 2026-03-08T23:56:54.219 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:53 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:53+0000 lvl=info msg="Executing migration" logger=migrator id="copy dashboard_provisioning v1 to v2" 2026-03-08T23:56:54.219 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:53 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:53+0000 lvl=info msg="Executing migration" logger=migrator id="drop dashboard_provisioning_tmp_qwerty" 2026-03-08T23:56:54.219 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:53 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:53+0000 lvl=info msg="Executing migration" logger=migrator id="Add check_sum column" 2026-03-08T23:56:54.219 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:53 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:53+0000 lvl=info msg="Executing migration" logger=migrator id="Add index for dashboard_title" 2026-03-08T23:56:54.219 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:53 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:53+0000 lvl=info msg="Executing migration" logger=migrator id="delete tags for deleted dashboards" 2026-03-08T23:56:54.219 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:53 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:53+0000 lvl=info msg="Executing migration" logger=migrator id="delete stars for deleted dashboards" 2026-03-08T23:56:54.219 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:53+0000 lvl=info msg="Executing migration" logger=migrator id="Add index for dashboard_is_folder" 2026-03-08T23:56:54.219 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="create data_source table" 2026-03-08T23:56:54.219 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="add index data_source.account_id" 2026-03-08T23:56:54.219 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index data_source.account_id_name" 2026-03-08T23:56:54.219 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="drop index IDX_data_source_account_id - v1" 2026-03-08T23:56:54.221 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="drop index UQE_data_source_account_id_name - v1" 2026-03-08T23:56:54.221 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="Rename table data_source to data_source_v1 - v1" 2026-03-08T23:56:54.221 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="create data_source table v2" 2026-03-08T23:56:54.221 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_data_source_org_id - v2" 2026-03-08T23:56:54.221 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_data_source_org_id_name - v2" 2026-03-08T23:56:54.221 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="copy data_source v1 to v2" 2026-03-08T23:56:54.221 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="Drop old table data_source_v1 #2" 2026-03-08T23:56:54.221 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="Add column with_credentials" 2026-03-08T23:56:54.221 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="Add secure json data column" 2026-03-08T23:56:54.221 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="Update data_source table charset" 2026-03-08T23:56:54.221 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="Update initial version to 1" 2026-03-08T23:56:54.221 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="Add read_only data column" 2026-03-08T23:56:54.221 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="Migrate logging ds to loki ds" 2026-03-08T23:56:54.221 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="Update json_data with nulls" 2026-03-08T23:56:54.221 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="Add uid column" 2026-03-08T23:56:54.221 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="Update uid value" 2026-03-08T23:56:54.221 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="Add unique index datasource_org_id_uid" 2026-03-08T23:56:54.221 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index datasource_org_id_is_default" 2026-03-08T23:56:54.221 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="create api_key table" 2026-03-08T23:56:54.221 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="add index api_key.account_id" 2026-03-08T23:56:54.221 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="add index api_key.key" 2026-03-08T23:56:54.221 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="add index api_key.account_id_name" 2026-03-08T23:56:54.221 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="drop index IDX_api_key_account_id - v1" 2026-03-08T23:56:54.221 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="drop index UQE_api_key_key - v1" 2026-03-08T23:56:54.221 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="drop index UQE_api_key_account_id_name - v1" 2026-03-08T23:56:54.221 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="Rename table api_key to api_key_v1 - v1" 2026-03-08T23:56:54.221 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="create api_key table v2" 2026-03-08T23:56:54.221 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_api_key_org_id - v2" 2026-03-08T23:56:54.221 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_api_key_key - v2" 2026-03-08T23:56:54.221 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_api_key_org_id_name - v2" 2026-03-08T23:56:54.221 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="copy api_key v1 to v2" 2026-03-08T23:56:54.221 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="Drop old table api_key_v1" 2026-03-08T23:56:54.221 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="Update api_key table charset" 2026-03-08T23:56:54.222 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="Add expires to api_key table" 2026-03-08T23:56:54.222 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="Add service account foreign key" 2026-03-08T23:56:54.222 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="create dashboard_snapshot table v4" 2026-03-08T23:56:54.222 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="drop table dashboard_snapshot_v4 #1" 2026-03-08T23:56:54.222 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="create dashboard_snapshot table v5 #2" 2026-03-08T23:56:54.222 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_dashboard_snapshot_key - v5" 2026-03-08T23:56:54.222 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_dashboard_snapshot_delete_key - v5" 2026-03-08T23:56:54.222 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_dashboard_snapshot_user_id - v5" 2026-03-08T23:56:54.222 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="alter dashboard_snapshot to mediumtext v2" 2026-03-08T23:56:54.222 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="Update dashboard_snapshot table charset" 2026-03-08T23:56:54.222 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="Add column external_delete_url to dashboard_snapshots table" 2026-03-08T23:56:54.222 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="Add encrypted dashboard json column" 2026-03-08T23:56:54.222 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="Change dashboard_encrypted column to MEDIUMBLOB" 2026-03-08T23:56:54.222 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="create quota table v1" 2026-03-08T23:56:54.222 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_quota_org_id_user_id_target - v1" 2026-03-08T23:56:54.222 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="Update quota table charset" 2026-03-08T23:56:54.222 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="create plugin_setting table" 2026-03-08T23:56:54.222 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_plugin_setting_org_id_plugin_id - v1" 2026-03-08T23:56:54.222 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="Add column plugin_version to plugin_settings" 2026-03-08T23:56:54.222 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="Update plugin_setting table charset" 2026-03-08T23:56:54.222 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="create session table" 2026-03-08T23:56:54.222 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="Drop old table playlist table" 2026-03-08T23:56:54.222 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="Drop old table playlist_item table" 2026-03-08T23:56:54.222 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="create playlist table v2" 2026-03-08T23:56:54.222 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="create playlist item table v2" 2026-03-08T23:56:54.222 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="Update playlist table charset" 2026-03-08T23:56:54.222 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="Update playlist_item table charset" 2026-03-08T23:56:54.222 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="drop preferences table v2" 2026-03-08T23:56:54.222 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="drop preferences table v3" 2026-03-08T23:56:54.222 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="create preferences table v3" 2026-03-08T23:56:54.222 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="Update preferences table charset" 2026-03-08T23:56:54.222 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="Add column team_id in preferences" 2026-03-08T23:56:54.222 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="Update team_id column values in preferences" 2026-03-08T23:56:54.222 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="Add column week_start in preferences" 2026-03-08T23:56:54.222 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="create alert table v1" 2026-03-08T23:56:54.222 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="add index alert org_id & id " 2026-03-08T23:56:54.222 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="add index alert state" 2026-03-08T23:56:54.222 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="add index alert dashboard_id" 2026-03-08T23:56:54.222 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="Create alert_rule_tag table v1" 2026-03-08T23:56:54.222 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="Add unique index alert_rule_tag.alert_id_tag_id" 2026-03-08T23:56:54.222 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="drop index UQE_alert_rule_tag_alert_id_tag_id - v1" 2026-03-08T23:56:54.222 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="Rename table alert_rule_tag to alert_rule_tag_v1 - v1" 2026-03-08T23:56:54.222 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="Create alert_rule_tag table v2" 2026-03-08T23:56:54.222 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_alert_rule_tag_alert_id_tag_id - Add unique index alert_rule_tag.alert_id_tag_id V2" 2026-03-08T23:56:54.223 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="copy alert_rule_tag v1 to v2" 2026-03-08T23:56:54.223 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="drop table alert_rule_tag_v1" 2026-03-08T23:56:54.223 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="create alert_notification table v1" 2026-03-08T23:56:54.223 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="Add column is_default" 2026-03-08T23:56:54.223 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="Add column frequency" 2026-03-08T23:56:54.223 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="Add column send_reminder" 2026-03-08T23:56:54.223 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="Add column disable_resolve_message" 2026-03-08T23:56:54.223 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="add index alert_notification org_id & name" 2026-03-08T23:56:54.223 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="Update alert table charset" 2026-03-08T23:56:54.223 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="Update alert_notification table charset" 2026-03-08T23:56:54.223 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="create notification_journal table v1" 2026-03-08T23:56:54.223 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="add index notification_journal org_id & alert_id & notifier_id" 2026-03-08T23:56:54.223 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="drop alert_notification_journal" 2026-03-08T23:56:54.223 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="create alert_notification_state table v1" 2026-03-08T23:56:54.223 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="add index alert_notification_state org_id & alert_id & notifier_id" 2026-03-08T23:56:54.223 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="Add for to alert table" 2026-03-08T23:56:54.223 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="Add column uid in alert_notification" 2026-03-08T23:56:54.223 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="Update uid column values in alert_notification" 2026-03-08T23:56:54.223 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="Add unique index alert_notification_org_id_uid" 2026-03-08T23:56:54.223 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="Remove unique index org_id_name" 2026-03-08T23:56:54.223 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="Add column secure_settings in alert_notification" 2026-03-08T23:56:54.223 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="alter alert.settings to mediumtext" 2026-03-08T23:56:54.223 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="Add non-unique index alert_notification_state_alert_id" 2026-03-08T23:56:54.223 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="Add non-unique index alert_rule_tag_alert_id" 2026-03-08T23:56:54.223 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="Drop old annotation table v4" 2026-03-08T23:56:54.223 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="create annotation table v5" 2026-03-08T23:56:54.223 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="add index annotation 0 v3" 2026-03-08T23:56:54.223 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="add index annotation 1 v3" 2026-03-08T23:56:54.223 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="add index annotation 2 v3" 2026-03-08T23:56:54.223 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="add index annotation 3 v3" 2026-03-08T23:56:54.223 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="add index annotation 4 v3" 2026-03-08T23:56:54.223 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="Update annotation table charset" 2026-03-08T23:56:54.223 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="Add column region_id to annotation table" 2026-03-08T23:56:54.223 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="Drop category_id index" 2026-03-08T23:56:54.223 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="Add column tags to annotation table" 2026-03-08T23:56:54.223 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="Create annotation_tag table v2" 2026-03-08T23:56:54.223 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="Add unique index annotation_tag.annotation_id_tag_id" 2026-03-08T23:56:54.223 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="drop index UQE_annotation_tag_annotation_id_tag_id - v2" 2026-03-08T23:56:54.223 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="Rename table annotation_tag to annotation_tag_v2 - v2" 2026-03-08T23:56:54.223 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="Create annotation_tag table v3" 2026-03-08T23:56:54.223 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_annotation_tag_annotation_id_tag_id - Add unique index annotation_tag.annotation_id_tag_id V3" 2026-03-08T23:56:54.223 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="copy annotation_tag v2 to v3" 2026-03-08T23:56:54.223 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="drop table annotation_tag_v2" 2026-03-08T23:56:54.223 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="Update alert annotations and set TEXT to empty" 2026-03-08T23:56:54.223 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="Add created time to annotation table" 2026-03-08T23:56:54.223 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="Add updated time to annotation table" 2026-03-08T23:56:54.227 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="Add index for created in annotation table" 2026-03-08T23:56:54.227 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="Add index for updated in annotation table" 2026-03-08T23:56:54.227 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="Convert existing annotations from seconds to milliseconds" 2026-03-08T23:56:54.227 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="Add epoch_end column" 2026-03-08T23:56:54.227 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="Add index for epoch_end" 2026-03-08T23:56:54.227 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="Make epoch_end the same as epoch" 2026-03-08T23:56:54.227 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="Move region to single row" 2026-03-08T23:56:54.227 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="Remove index org_id_epoch from annotation table" 2026-03-08T23:56:54.227 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="Remove index org_id_dashboard_id_panel_id_epoch from annotation table" 2026-03-08T23:56:54.227 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="Add index for org_id_dashboard_id_epoch_end_epoch on annotation table" 2026-03-08T23:56:54.227 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="Add index for org_id_epoch_end_epoch on annotation table" 2026-03-08T23:56:54.227 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="Remove index org_id_epoch_epoch_end from annotation table" 2026-03-08T23:56:54.227 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="Add index for alert_id on annotation table" 2026-03-08T23:56:54.227 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="create test_data table" 2026-03-08T23:56:54.227 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="create dashboard_version table v1" 2026-03-08T23:56:54.227 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="add index dashboard_version.dashboard_id" 2026-03-08T23:56:54.227 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index dashboard_version.dashboard_id and dashboard_version.version" 2026-03-08T23:56:54.227 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="Set dashboard version to 1 where 0" 2026-03-08T23:56:54.227 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="save existing dashboard data in dashboard_version table v1" 2026-03-08T23:56:54.227 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="alter dashboard_version.data to mediumtext v1" 2026-03-08T23:56:54.227 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="create team table" 2026-03-08T23:56:54.227 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="add index team.org_id" 2026-03-08T23:56:54.227 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index team_org_id_name" 2026-03-08T23:56:54.227 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="create team member table" 2026-03-08T23:56:54.227 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="add index team_member.org_id" 2026-03-08T23:56:54.227 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index team_member_org_id_team_id_user_id" 2026-03-08T23:56:54.228 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="add index team_member.team_id" 2026-03-08T23:56:54.228 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="Add column email to team table" 2026-03-08T23:56:54.228 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="Add column external to team_member table" 2026-03-08T23:56:54.228 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="Add column permission to team_member table" 2026-03-08T23:56:54.228 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="create dashboard acl table" 2026-03-08T23:56:54.228 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="add index dashboard_acl_dashboard_id" 2026-03-08T23:56:54.228 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index dashboard_acl_dashboard_id_user_id" 2026-03-08T23:56:54.228 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index dashboard_acl_dashboard_id_team_id" 2026-03-08T23:56:54.228 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="add index dashboard_acl_user_id" 2026-03-08T23:56:54.228 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="add index dashboard_acl_team_id" 2026-03-08T23:56:54.228 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="add index dashboard_acl_org_id_role" 2026-03-08T23:56:54.228 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="add index dashboard_permission" 2026-03-08T23:56:54.228 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="save default acl rules in dashboard_acl table" 2026-03-08T23:56:54.228 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="delete acl rules for deleted dashboards and folders" 2026-03-08T23:56:54.229 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="create tag table" 2026-03-08T23:56:54.229 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="add index tag.key_value" 2026-03-08T23:56:54.229 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="create login attempt table" 2026-03-08T23:56:54.229 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="add index login_attempt.username" 2026-03-08T23:56:54.229 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="drop index IDX_login_attempt_username - v1" 2026-03-08T23:56:54.229 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="Rename table login_attempt to login_attempt_tmp_qwerty - v1" 2026-03-08T23:56:54.229 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="create login_attempt v2" 2026-03-08T23:56:54.229 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_login_attempt_username - v2" 2026-03-08T23:56:54.229 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="copy login_attempt v1 to v2" 2026-03-08T23:56:54.229 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="drop login_attempt_tmp_qwerty" 2026-03-08T23:56:54.229 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="create user auth table" 2026-03-08T23:56:54.229 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_user_auth_auth_module_auth_id - v1" 2026-03-08T23:56:54.229 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="alter user_auth.auth_id to length 190" 2026-03-08T23:56:54.229 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="Add OAuth access token to user_auth" 2026-03-08T23:56:54.229 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="Add OAuth refresh token to user_auth" 2026-03-08T23:56:54.229 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="Add OAuth token type to user_auth" 2026-03-08T23:56:54.229 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="Add OAuth expiry to user_auth" 2026-03-08T23:56:54.229 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="Add index to user_id column in user_auth" 2026-03-08T23:56:54.229 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="create server_lock table" 2026-03-08T23:56:54.229 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="add index server_lock.operation_uid" 2026-03-08T23:56:54.229 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="create user auth token table" 2026-03-08T23:56:54.229 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index user_auth_token.auth_token" 2026-03-08T23:56:54.229 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index user_auth_token.prev_auth_token" 2026-03-08T23:56:54.229 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="add index user_auth_token.user_id" 2026-03-08T23:56:54.229 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="Add revoked_at to the user auth token" 2026-03-08T23:56:54.229 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="create cache_data table" 2026-03-08T23:56:54.229 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index cache_data.cache_key" 2026-03-08T23:56:54.229 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="create short_url table v1" 2026-03-08T23:56:54.229 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="add index short_url.org_id-uid" 2026-03-08T23:56:54.229 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="delete alert_definition table" 2026-03-08T23:56:54.229 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="recreate alert_definition table" 2026-03-08T23:56:54.229 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_definition on org_id and title columns" 2026-03-08T23:56:54.229 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_definition on org_id and uid columns" 2026-03-08T23:56:54.229 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="alter alert_definition table data column to mediumtext in mysql" 2026-03-08T23:56:54.229 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="drop index in alert_definition on org_id and title columns" 2026-03-08T23:56:54.229 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="drop index in alert_definition on org_id and uid columns" 2026-03-08T23:56:54.229 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index in alert_definition on org_id and title columns" 2026-03-08T23:56:54.229 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index in alert_definition on org_id and uid columns" 2026-03-08T23:56:54.229 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="Add column paused in alert_definition" 2026-03-08T23:56:54.229 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="drop alert_definition table" 2026-03-08T23:56:54.230 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="delete alert_definition_version table" 2026-03-08T23:56:54.230 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="recreate alert_definition_version table" 2026-03-08T23:56:54.230 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_definition_version table on alert_definition_id and version columns" 2026-03-08T23:56:54.230 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_definition_version table on alert_definition_uid and version columns" 2026-03-08T23:56:54.230 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="alter alert_definition_version table data column to mediumtext in mysql" 2026-03-08T23:56:54.230 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="drop alert_definition_version table" 2026-03-08T23:56:54.230 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="create alert_instance table" 2026-03-08T23:56:54.230 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_instance table on def_org_id, def_uid and current_state columns" 2026-03-08T23:56:54.230 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_instance table on def_org_id, current_state columns" 2026-03-08T23:56:54.230 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="add column current_state_end to alert_instance" 2026-03-08T23:56:54.230 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="remove index def_org_id, def_uid, current_state on alert_instance" 2026-03-08T23:56:54.230 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="remove index def_org_id, current_state on alert_instance" 2026-03-08T23:56:54.230 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="rename def_org_id to rule_org_id in alert_instance" 2026-03-08T23:56:54.230 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="rename def_uid to rule_uid in alert_instance" 2026-03-08T23:56:54.230 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="add index rule_org_id, rule_uid, current_state on alert_instance" 2026-03-08T23:56:54.230 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="add index rule_org_id, current_state on alert_instance" 2026-03-08T23:56:54.230 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="create alert_rule table" 2026-03-08T23:56:54.230 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_rule on org_id and title columns" 2026-03-08T23:56:54.230 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_rule on org_id and uid columns" 2026-03-08T23:56:54.230 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_rule on org_id, namespace_uid, group_uid columns" 2026-03-08T23:56:54.230 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="alter alert_rule table data column to mediumtext in mysql" 2026-03-08T23:56:54.230 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="add column for to alert_rule" 2026-03-08T23:56:54.230 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="add column annotations to alert_rule" 2026-03-08T23:56:54.230 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="add column labels to alert_rule" 2026-03-08T23:56:54.230 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="remove unique index from alert_rule on org_id, title columns" 2026-03-08T23:56:54.230 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_rule on org_id, namespase_uid and title columns" 2026-03-08T23:56:54.230 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="add dashboard_uid column to alert_rule" 2026-03-08T23:56:54.230 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="add panel_id column to alert_rule" 2026-03-08T23:56:54.230 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_rule on org_id, dashboard_uid and panel_id columns" 2026-03-08T23:56:54.230 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="create alert_rule_version table" 2026-03-08T23:56:54.230 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_rule_version table on rule_org_id, rule_uid and version columns" 2026-03-08T23:56:54.230 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_rule_version table on rule_org_id, rule_namespace_uid and rule_group columns" 2026-03-08T23:56:54.230 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="alter alert_rule_version table data column to mediumtext in mysql" 2026-03-08T23:56:54.230 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="add column for to alert_rule_version" 2026-03-08T23:56:54.230 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="add column annotations to alert_rule_version" 2026-03-08T23:56:54.230 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="add column labels to alert_rule_version" 2026-03-08T23:56:54.230 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id=create_alert_configuration_table 2026-03-08T23:56:54.230 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="Add column default in alert_configuration" 2026-03-08T23:56:54.231 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="alert alert_configuration alertmanager_configuration column from TEXT to MEDIUMTEXT if mysql" 2026-03-08T23:56:54.232 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="add column org_id in alert_configuration" 2026-03-08T23:56:54.232 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_configuration table on org_id column" 2026-03-08T23:56:54.232 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id=create_ngalert_configuration_table 2026-03-08T23:56:54.232 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="add index in ngalert_configuration on org_id column" 2026-03-08T23:56:54.232 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="clear migration entry \"remove unified alerting data\"" 2026-03-08T23:56:54.232 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="move dashboard alerts to unified alerting" 2026-03-08T23:56:54.232 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="create library_element table v1" 2026-03-08T23:56:54.232 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="add index library_element org_id-folder_id-name-kind" 2026-03-08T23:56:54.232 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="create library_element_connection table v1" 2026-03-08T23:56:54.232 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="add index library_element_connection element_id-kind-connection_id" 2026-03-08T23:56:54.232 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index library_element org_id_uid" 2026-03-08T23:56:54.232 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="clone move dashboard alerts to unified alerting" 2026-03-08T23:56:54.232 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="create data_keys table" 2026-03-08T23:56:54.232 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="create kv_store table v1" 2026-03-08T23:56:54.232 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="add index kv_store.org_id-namespace-key" 2026-03-08T23:56:54.232 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="update dashboard_uid and panel_id from existing annotations" 2026-03-08T23:56:54.232 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="create permission table" 2026-03-08T23:56:54.232 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index permission.role_id" 2026-03-08T23:56:54.232 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index role_id_action_scope" 2026-03-08T23:56:54.232 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="create role table" 2026-03-08T23:56:54.232 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="add column display_name" 2026-03-08T23:56:54.232 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="add column group_name" 2026-03-08T23:56:54.232 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="add index role.org_id" 2026-03-08T23:56:54.232 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index role_org_id_name" 2026-03-08T23:56:54.232 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="add index role_org_id_uid" 2026-03-08T23:56:54.232 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="create team role table" 2026-03-08T23:56:54.232 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="add index team_role.org_id" 2026-03-08T23:56:54.232 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index team_role_org_id_team_id_role_id" 2026-03-08T23:56:54.232 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="add index team_role.team_id" 2026-03-08T23:56:54.232 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="create user role table" 2026-03-08T23:56:54.232 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="add index user_role.org_id" 2026-03-08T23:56:54.232 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index user_role_org_id_user_id_role_id" 2026-03-08T23:56:54.232 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="add index user_role.user_id" 2026-03-08T23:56:54.579 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="create builtin role table" 2026-03-08T23:56:54.579 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="add index builtin_role.role_id" 2026-03-08T23:56:54.579 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="add index builtin_role.name" 2026-03-08T23:56:54.579 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="Add column org_id to builtin_role table" 2026-03-08T23:56:54.579 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="add index builtin_role.org_id" 2026-03-08T23:56:54.579 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index builtin_role_org_id_role_id_role" 2026-03-08T23:56:54.579 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="Remove unique index role_org_id_uid" 2026-03-08T23:56:54.579 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index role.uid" 2026-03-08T23:56:54.579 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="create seed assignment table" 2026-03-08T23:56:54.579 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index builtin_role_role_name" 2026-03-08T23:56:54.579 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="migrations completed" logger=migrator performed=381 skipped=0 duration=330.748205ms 2026-03-08T23:56:54.579 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Created default organization" logger=sqlstore 2026-03-08T23:56:54.579 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Initialising plugins" logger=plugin.manager 2026-03-08T23:56:54.579 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Plugin registered" logger=plugin.manager pluginId=input 2026-03-08T23:56:54.579 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Plugin registered" logger=plugin.manager pluginId=grafana-piechart-panel 2026-03-08T23:56:54.579 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Plugin registered" logger=plugin.manager pluginId=vonage-status-panel 2026-03-08T23:56:54.579 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="Live Push Gateway initialization" logger=live.push_http 2026-03-08T23:56:54.579 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=warn msg="[Deprecated] the datasource provisioning config is outdated. please upgrade" logger=provisioning.datasources filename=/etc/grafana/provisioning/datasources/ceph-dashboard.yml 2026-03-08T23:56:54.579 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="inserting datasource from configuration " logger=provisioning.datasources name=Dashboard1 uid=P43CA22E17D0F9596 2026-03-08T23:56:54.579 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="HTTP Server Listen" logger=http.server address=[::]:3000 protocol=https subUrl= socket= 2026-03-08T23:56:54.579 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="warming cache for startup" logger=ngalert 2026-03-08T23:56:54.580 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 08 23:56:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-08T23:56:54+0000 lvl=info msg="starting MultiOrg Alertmanager" logger=ngalert.multiorg.alertmanager 2026-03-08T23:56:54.993 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:54 vm10 ceph-mon[48982]: pgmap v36: 161 pgs: 161 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 289 B/s wr, 0 op/s 2026-03-08T23:56:54.993 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:54 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:54.993 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:54 vm10 ceph-mon[48982]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:56:54.994 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:54 vm10 ceph-mon[48982]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:56:54.994 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:54 vm10 ceph-mon[48982]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:56:54.994 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:54 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:55.319 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:54 vm04 ceph-mon[51053]: pgmap v36: 161 pgs: 161 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 289 B/s wr, 0 op/s 2026-03-08T23:56:55.319 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:54 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:55.319 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:54 vm04 ceph-mon[51053]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:56:55.319 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:54 vm04 ceph-mon[51053]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:56:55.319 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:54 vm04 ceph-mon[51053]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:56:55.319 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:54 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:55.319 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:54 vm04 ceph-mon[46823]: pgmap v36: 161 pgs: 161 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 289 B/s wr, 0 op/s 2026-03-08T23:56:55.319 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:54 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:55.319 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:54 vm04 ceph-mon[46823]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:56:55.319 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:54 vm04 ceph-mon[46823]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:56:55.319 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:54 vm04 ceph-mon[46823]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:56:55.319 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:54 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:56.240 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:56 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:56.241 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:56 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:56.241 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:56 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:56.241 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:56 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:56.241 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:56 vm10 ceph-mon[48982]: Saving service rgw.foo spec with placement count:2 2026-03-08T23:56:56.241 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:56 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:56.241 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:56 vm10 ceph-mon[48982]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm04.ehrfsf", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-08T23:56:56.241 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:56 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm04.ehrfsf", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-08T23:56:56.241 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:56 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm04.ehrfsf", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-08T23:56:56.241 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:56 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:56.241 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:56 vm10 ceph-mon[48982]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:56:56.241 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:56 vm10 ceph-mon[48982]: Deploying daemon rgw.foo.vm04.ehrfsf on vm04 2026-03-08T23:56:56.241 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:56 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:56.241 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:56 vm10 ceph-mon[48982]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm10.dwizvi", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-08T23:56:56.241 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:56 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm10.dwizvi", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-08T23:56:56.241 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:56 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm10.dwizvi", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-08T23:56:56.241 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:56 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:56.241 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:56 vm10 ceph-mon[48982]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:56:56.241 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:56 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:56.241 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:56 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:56.241 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:56 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:56.241 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:56 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:56.241 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:56 vm04 ceph-mon[51053]: Saving service rgw.foo spec with placement count:2 2026-03-08T23:56:56.241 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:56 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:56.241 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:56 vm04 ceph-mon[51053]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm04.ehrfsf", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-08T23:56:56.241 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:56 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm04.ehrfsf", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-08T23:56:56.241 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:56 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm04.ehrfsf", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-08T23:56:56.241 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:56 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:56.241 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:56 vm04 ceph-mon[51053]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:56:56.241 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:56 vm04 ceph-mon[51053]: Deploying daemon rgw.foo.vm04.ehrfsf on vm04 2026-03-08T23:56:56.241 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:56 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:56.241 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:56 vm04 ceph-mon[51053]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm10.dwizvi", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-08T23:56:56.241 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:56 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm10.dwizvi", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-08T23:56:56.241 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:56 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm10.dwizvi", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-08T23:56:56.241 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:56 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:56.241 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:56 vm04 ceph-mon[51053]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:56:56.241 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:56 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:56.241 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:56 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:56.241 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:56 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:56.241 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:56 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:56.241 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:56 vm04 ceph-mon[46823]: Saving service rgw.foo spec with placement count:2 2026-03-08T23:56:56.241 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:56 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:56.241 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:56 vm04 ceph-mon[46823]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm04.ehrfsf", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-08T23:56:56.242 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:56 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm04.ehrfsf", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-08T23:56:56.242 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:56 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm04.ehrfsf", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-08T23:56:56.242 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:56 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:56.242 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:56 vm04 ceph-mon[46823]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:56:56.242 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:56 vm04 ceph-mon[46823]: Deploying daemon rgw.foo.vm04.ehrfsf on vm04 2026-03-08T23:56:56.242 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:56 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:56.242 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:56 vm04 ceph-mon[46823]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm10.dwizvi", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-08T23:56:56.242 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:56 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm10.dwizvi", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-08T23:56:56.242 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:56 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm10.dwizvi", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-08T23:56:56.242 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:56 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:56.242 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:56 vm04 ceph-mon[46823]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:56:57.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:57 vm10 ceph-mon[48982]: pgmap v37: 161 pgs: 161 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 253 B/s wr, 0 op/s 2026-03-08T23:56:57.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:57 vm10 ceph-mon[48982]: Deploying daemon rgw.foo.vm10.dwizvi on vm10 2026-03-08T23:56:57.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:57 vm10 ceph-mon[48982]: osdmap e61: 8 total, 8 up, 8 in 2026-03-08T23:56:57.545 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:57 vm04 ceph-mon[46823]: pgmap v37: 161 pgs: 161 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 253 B/s wr, 0 op/s 2026-03-08T23:56:57.545 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:57 vm04 ceph-mon[46823]: Deploying daemon rgw.foo.vm10.dwizvi on vm10 2026-03-08T23:56:57.545 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:57 vm04 ceph-mon[46823]: osdmap e61: 8 total, 8 up, 8 in 2026-03-08T23:56:57.545 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:57 vm04 ceph-mon[51053]: pgmap v37: 161 pgs: 161 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 253 B/s wr, 0 op/s 2026-03-08T23:56:57.545 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:57 vm04 ceph-mon[51053]: Deploying daemon rgw.foo.vm10.dwizvi on vm10 2026-03-08T23:56:57.545 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:57 vm04 ceph-mon[51053]: osdmap e61: 8 total, 8 up, 8 in 2026-03-08T23:56:58.651 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:58 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:58.651 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:58 vm04 ceph-mon[51053]: Checking pool "foo" exists for service iscsi.foo 2026-03-08T23:56:58.651 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:58 vm04 ceph-mon[51053]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm04.fbyciv", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-08T23:56:58.651 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:58 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm04.fbyciv", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-08T23:56:58.652 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:58 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' cmd='[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm04.fbyciv", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]': finished 2026-03-08T23:56:58.652 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:58 vm04 ceph-mon[51053]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:56:58.652 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:58 vm04 ceph-mon[51053]: Deploying daemon iscsi.foo.vm04.fbyciv on vm04 2026-03-08T23:56:58.652 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:58 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:58.652 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:58 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:58.652 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:58 vm04 ceph-mon[46823]: Checking pool "foo" exists for service iscsi.foo 2026-03-08T23:56:58.652 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:58 vm04 ceph-mon[46823]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm04.fbyciv", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-08T23:56:58.652 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:58 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm04.fbyciv", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-08T23:56:58.652 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:58 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' cmd='[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm04.fbyciv", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]': finished 2026-03-08T23:56:58.652 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:58 vm04 ceph-mon[46823]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:56:58.652 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:58 vm04 ceph-mon[46823]: Deploying daemon iscsi.foo.vm04.fbyciv on vm04 2026-03-08T23:56:58.652 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:58 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:58.726 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:58 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:58.726 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:58 vm10 ceph-mon[48982]: Checking pool "foo" exists for service iscsi.foo 2026-03-08T23:56:58.726 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:58 vm10 ceph-mon[48982]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm04.fbyciv", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-08T23:56:58.726 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:58 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm04.fbyciv", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-08T23:56:58.726 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:58 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' cmd='[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm04.fbyciv", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]': finished 2026-03-08T23:56:58.726 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:58 vm10 ceph-mon[48982]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:56:58.726 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:58 vm10 ceph-mon[48982]: Deploying daemon iscsi.foo.vm04.fbyciv on vm04 2026-03-08T23:56:58.726 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:58 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:59.079 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 08 23:56:58 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[50180]: ::ffff:192.168.123.110 - - [08/Mar/2026:23:56:58] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-08T23:56:59.352 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:56:59 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: ::ffff:192.168.123.110 - - [08/Mar/2026:23:56:59] "GET /metrics HTTP/1.1" 200 197495 "" "Prometheus/2.33.4" 2026-03-08T23:56:59.684 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:59 vm10 ceph-mon[48982]: pgmap v39: 161 pgs: 161 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 226 B/s wr, 0 op/s 2026-03-08T23:56:59.684 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:59 vm10 ceph-mon[48982]: mgrmap e20: y(active, since 40s), standbys: x 2026-03-08T23:56:59.684 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:59 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:59.684 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:59 vm10 ceph-mon[48982]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:56:59.684 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:59 vm10 ceph-mon[48982]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:56:59.684 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:56:59 vm10 ceph-mon[48982]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:56:59.694 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:59 vm04 ceph-mon[46823]: pgmap v39: 161 pgs: 161 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 226 B/s wr, 0 op/s 2026-03-08T23:56:59.695 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:59 vm04 ceph-mon[46823]: mgrmap e20: y(active, since 40s), standbys: x 2026-03-08T23:56:59.695 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:59 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:59.695 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:59 vm04 ceph-mon[46823]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:56:59.695 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:59 vm04 ceph-mon[46823]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:56:59.695 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:56:59 vm04 ceph-mon[46823]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:56:59.695 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:59 vm04 ceph-mon[51053]: pgmap v39: 161 pgs: 161 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 226 B/s wr, 0 op/s 2026-03-08T23:56:59.695 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:59 vm04 ceph-mon[51053]: mgrmap e20: y(active, since 40s), standbys: x 2026-03-08T23:56:59.695 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:59 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:56:59.695 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:59 vm04 ceph-mon[51053]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:56:59.695 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:59 vm04 ceph-mon[51053]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:56:59.695 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:56:59 vm04 ceph-mon[51053]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:57:00.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:00 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/2371341634' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-08T23:57:00.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:00 vm10 ceph-mon[48982]: pgmap v40: 161 pgs: 161 active+clean; 456 KiB data, 57 MiB used, 160 GiB / 160 GiB avail; 79 KiB/s rd, 3.4 KiB/s wr, 160 op/s 2026-03-08T23:57:00.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:00 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/79228715' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/392608918"}]: dispatch 2026-03-08T23:57:00.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:00 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:57:00.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:00 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:57:00.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:00 vm10 ceph-mon[48982]: Checking dashboard <-> RGW credentials 2026-03-08T23:57:00.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:00 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/2371341634' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-08T23:57:00.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:00 vm04 ceph-mon[46823]: pgmap v40: 161 pgs: 161 active+clean; 456 KiB data, 57 MiB used, 160 GiB / 160 GiB avail; 79 KiB/s rd, 3.4 KiB/s wr, 160 op/s 2026-03-08T23:57:00.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:00 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/79228715' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/392608918"}]: dispatch 2026-03-08T23:57:00.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:00 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:57:00.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:00 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:57:00.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:00 vm04 ceph-mon[46823]: Checking dashboard <-> RGW credentials 2026-03-08T23:57:00.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:00 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/2371341634' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-08T23:57:00.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:00 vm04 ceph-mon[51053]: pgmap v40: 161 pgs: 161 active+clean; 456 KiB data, 57 MiB used, 160 GiB / 160 GiB avail; 79 KiB/s rd, 3.4 KiB/s wr, 160 op/s 2026-03-08T23:57:00.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:00 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/79228715' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/392608918"}]: dispatch 2026-03-08T23:57:00.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:00 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:57:00.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:00 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:57:00.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:00 vm04 ceph-mon[51053]: Checking dashboard <-> RGW credentials 2026-03-08T23:57:01.431 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:01 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/79228715' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/392608918"}]': finished 2026-03-08T23:57:01.431 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:01 vm04 ceph-mon[46823]: osdmap e62: 8 total, 8 up, 8 in 2026-03-08T23:57:01.431 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:01 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/4084036102' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:6800/1056094101"}]: dispatch 2026-03-08T23:57:01.431 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:01 vm04 ceph-mon[46823]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:6800/1056094101"}]: dispatch 2026-03-08T23:57:01.431 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:01 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:57:01.431 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:01 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:57:01.431 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:01 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:57:01.431 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:01 vm04 ceph-mon[46823]: Reconfiguring alertmanager.a (dependencies changed)... 2026-03-08T23:57:01.431 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:01 vm04 ceph-mon[46823]: Reconfiguring daemon alertmanager.a on vm04 2026-03-08T23:57:01.431 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:57:01 vm04 systemd[1]: Stopping Ceph alertmanager.a for fdcbddf6-1b49-11f1-80b0-7392062373f9... 2026-03-08T23:57:01.431 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:57:01 vm04 bash[71697]: Error: no container with name or ID "ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager.a" found: no such container 2026-03-08T23:57:01.434 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:01 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/79228715' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/392608918"}]': finished 2026-03-08T23:57:01.573 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:01 vm04 ceph-mon[51053]: osdmap e62: 8 total, 8 up, 8 in 2026-03-08T23:57:01.573 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:01 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/4084036102' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:6800/1056094101"}]: dispatch 2026-03-08T23:57:01.574 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:01 vm04 ceph-mon[51053]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:6800/1056094101"}]: dispatch 2026-03-08T23:57:01.574 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:01 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:57:01.574 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:01 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:57:01.574 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:01 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:57:01.574 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:01 vm04 ceph-mon[51053]: Reconfiguring alertmanager.a (dependencies changed)... 2026-03-08T23:57:01.574 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:01 vm04 ceph-mon[51053]: Reconfiguring daemon alertmanager.a on vm04 2026-03-08T23:57:01.574 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:57:01 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[68647]: level=info ts=2026-03-08T23:57:01.489Z caller=main.go:557 msg="Received SIGTERM, exiting gracefully..." 2026-03-08T23:57:01.574 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:57:01 vm04 podman[71705]: 2026-03-08 23:57:01.500983336 +0000 UTC m=+0.045632603 container died 03f62be4feda35afbe9ddbd90c4eec54d36e7e0ac989e1bdf100dfb6ed2826a3 (image=quay.io/prometheus/alertmanager:v0.23.0, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a, maintainer=The Prometheus Authors ) 2026-03-08T23:57:01.574 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:57:01 vm04 podman[71705]: 2026-03-08 23:57:01.525763516 +0000 UTC m=+0.070412783 container remove 03f62be4feda35afbe9ddbd90c4eec54d36e7e0ac989e1bdf100dfb6ed2826a3 (image=quay.io/prometheus/alertmanager:v0.23.0, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a, maintainer=The Prometheus Authors ) 2026-03-08T23:57:01.574 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:57:01 vm04 podman[71705]: 2026-03-08 23:57:01.526696087 +0000 UTC m=+0.071345354 volume remove 7f8e47d58e929cf71a22a1122f158a66b794ebb3c865ed168aff636c9e875c0c 2026-03-08T23:57:01.574 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:57:01 vm04 bash[71705]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a 2026-03-08T23:57:01.574 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:57:01 vm04 bash[71725]: Error: no container with name or ID "ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager.a" found: no such container 2026-03-08T23:57:01.574 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:57:01 vm04 systemd[1]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@alertmanager.a.service: Deactivated successfully. 2026-03-08T23:57:01.574 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:57:01 vm04 systemd[1]: Stopped Ceph alertmanager.a for fdcbddf6-1b49-11f1-80b0-7392062373f9. 2026-03-08T23:57:01.574 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:57:01 vm04 systemd[1]: Starting Ceph alertmanager.a for fdcbddf6-1b49-11f1-80b0-7392062373f9... 2026-03-08T23:57:01.698 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:57:01 vm04 podman[71785]: 2026-03-08 23:57:01.672027507 +0000 UTC m=+0.017239573 volume create 3456f770698786c6551bea6918a8bdb294854f0d4690afa6a6d170bda388a7b3 2026-03-08T23:57:01.698 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:57:01 vm04 podman[71785]: 2026-03-08 23:57:01.674321303 +0000 UTC m=+0.019533369 container create 77a61b512c93e705733ba9dc92af6207681afabdae4d63dee204546bcb635ab7 (image=quay.io/prometheus/alertmanager:v0.23.0, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a, maintainer=The Prometheus Authors ) 2026-03-08T23:57:01.764 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:01 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/79228715' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/392608918"}]': finished 2026-03-08T23:57:01.764 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:01 vm10 ceph-mon[48982]: osdmap e62: 8 total, 8 up, 8 in 2026-03-08T23:57:01.764 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:01 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/4084036102' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:6800/1056094101"}]: dispatch 2026-03-08T23:57:01.764 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:01 vm10 ceph-mon[48982]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:6800/1056094101"}]: dispatch 2026-03-08T23:57:01.764 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:01 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:57:01.764 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:01 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:57:01.764 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:01 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:57:01.764 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:01 vm10 ceph-mon[48982]: Reconfiguring alertmanager.a (dependencies changed)... 2026-03-08T23:57:01.764 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:01 vm10 ceph-mon[48982]: Reconfiguring daemon alertmanager.a on vm04 2026-03-08T23:57:02.100 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:57:01 vm04 podman[71785]: 2026-03-08 23:57:01.70327077 +0000 UTC m=+0.048482846 container init 77a61b512c93e705733ba9dc92af6207681afabdae4d63dee204546bcb635ab7 (image=quay.io/prometheus/alertmanager:v0.23.0, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a, maintainer=The Prometheus Authors ) 2026-03-08T23:57:02.100 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:57:01 vm04 podman[71785]: 2026-03-08 23:57:01.705528047 +0000 UTC m=+0.050740113 container start 77a61b512c93e705733ba9dc92af6207681afabdae4d63dee204546bcb635ab7 (image=quay.io/prometheus/alertmanager:v0.23.0, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a, maintainer=The Prometheus Authors ) 2026-03-08T23:57:02.100 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:57:01 vm04 bash[71785]: 77a61b512c93e705733ba9dc92af6207681afabdae4d63dee204546bcb635ab7 2026-03-08T23:57:02.100 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:57:01 vm04 podman[71785]: 2026-03-08 23:57:01.665011993 +0000 UTC m=+0.010224059 image pull ba2b418f427c0636d654de8757e830c80168e76482bcc46bb2138e569d6c91d4 quay.io/prometheus/alertmanager:v0.23.0 2026-03-08T23:57:02.100 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:57:01 vm04 systemd[1]: Started Ceph alertmanager.a for fdcbddf6-1b49-11f1-80b0-7392062373f9. 2026-03-08T23:57:02.100 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:57:01 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=info ts=2026-03-08T23:57:01.720Z caller=main.go:225 msg="Starting Alertmanager" version="(version=0.23.0, branch=HEAD, revision=61046b17771a57cfd4c4a51be370ab930a4d7d54)" 2026-03-08T23:57:02.100 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:57:01 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=info ts=2026-03-08T23:57:01.720Z caller=main.go:226 build_context="(go=go1.16.7, user=root@e21a959be8d2, date=20210825-10:48:55)" 2026-03-08T23:57:02.100 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:57:01 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=info ts=2026-03-08T23:57:01.721Z caller=cluster.go:184 component=cluster msg="setting advertise address explicitly" addr=192.168.123.104 port=9094 2026-03-08T23:57:02.100 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:57:01 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=info ts=2026-03-08T23:57:01.722Z caller=cluster.go:671 component=cluster msg="Waiting for gossip to settle..." interval=2s 2026-03-08T23:57:02.100 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:57:01 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=info ts=2026-03-08T23:57:01.743Z caller=coordinator.go:113 component=configuration msg="Loading configuration file" file=/etc/alertmanager/alertmanager.yml 2026-03-08T23:57:02.100 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:57:01 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=info ts=2026-03-08T23:57:01.744Z caller=coordinator.go:126 component=configuration msg="Completed loading of configuration file" file=/etc/alertmanager/alertmanager.yml 2026-03-08T23:57:02.100 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:57:01 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=info ts=2026-03-08T23:57:01.748Z caller=main.go:518 msg=Listening address=:9093 2026-03-08T23:57:02.100 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:57:01 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=info ts=2026-03-08T23:57:01.748Z caller=tls_config.go:191 msg="TLS is disabled." http2=false 2026-03-08T23:57:02.266 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 08 23:57:02 vm10 systemd[1]: Stopping Ceph prometheus.a for fdcbddf6-1b49-11f1-80b0-7392062373f9... 2026-03-08T23:57:02.266 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 08 23:57:02 vm10 bash[65834]: Error: no container with name or ID "ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus.a" found: no such container 2026-03-08T23:57:02.266 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 08 23:57:02 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[63897]: ts=2026-03-08T23:57:02.242Z caller=main.go:775 level=warn msg="Received SIGTERM, exiting gracefully..." 2026-03-08T23:57:02.266 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 08 23:57:02 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[63897]: ts=2026-03-08T23:57:02.242Z caller=main.go:798 level=info msg="Stopping scrape discovery manager..." 2026-03-08T23:57:02.266 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 08 23:57:02 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[63897]: ts=2026-03-08T23:57:02.242Z caller=main.go:812 level=info msg="Stopping notify discovery manager..." 2026-03-08T23:57:02.266 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 08 23:57:02 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[63897]: ts=2026-03-08T23:57:02.242Z caller=main.go:834 level=info msg="Stopping scrape manager..." 2026-03-08T23:57:02.266 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 08 23:57:02 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[63897]: ts=2026-03-08T23:57:02.242Z caller=main.go:794 level=info msg="Scrape discovery manager stopped" 2026-03-08T23:57:02.266 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 08 23:57:02 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[63897]: ts=2026-03-08T23:57:02.242Z caller=main.go:808 level=info msg="Notify discovery manager stopped" 2026-03-08T23:57:02.266 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 08 23:57:02 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[63897]: ts=2026-03-08T23:57:02.242Z caller=manager.go:945 level=info component="rule manager" msg="Stopping rule manager..." 2026-03-08T23:57:02.266 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 08 23:57:02 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[63897]: ts=2026-03-08T23:57:02.242Z caller=manager.go:955 level=info component="rule manager" msg="Rule manager stopped" 2026-03-08T23:57:02.266 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 08 23:57:02 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[63897]: ts=2026-03-08T23:57:02.242Z caller=main.go:828 level=info msg="Scrape manager stopped" 2026-03-08T23:57:02.266 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 08 23:57:02 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[63897]: ts=2026-03-08T23:57:02.243Z caller=notifier.go:600 level=info component=notifier msg="Stopping notification manager..." 2026-03-08T23:57:02.266 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 08 23:57:02 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[63897]: ts=2026-03-08T23:57:02.243Z caller=main.go:1054 level=info msg="Notifier manager stopped" 2026-03-08T23:57:02.266 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 08 23:57:02 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[63897]: ts=2026-03-08T23:57:02.243Z caller=main.go:1066 level=info msg="See you next time!" 2026-03-08T23:57:02.266 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 08 23:57:02 vm10 podman[65841]: 2026-03-08 23:57:02.253593783 +0000 UTC m=+0.027776878 container died 9570e658ccd114778a7d09f8f20c1ab17f9e872371121206959b318ef4d4d8e4 (image=quay.io/prometheus/prometheus:v2.33.4, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-08T23:57:02.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:02 vm10 ceph-mon[48982]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:6800/1056094101"}]': finished 2026-03-08T23:57:02.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:02 vm10 ceph-mon[48982]: osdmap e63: 8 total, 8 up, 8 in 2026-03-08T23:57:02.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:02 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/1557704601' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/2797711501"}]: dispatch 2026-03-08T23:57:02.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:02 vm10 ceph-mon[48982]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/2797711501"}]: dispatch 2026-03-08T23:57:02.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:02 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:57:02.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:02 vm10 ceph-mon[48982]: Reconfiguring prometheus.a (dependencies changed)... 2026-03-08T23:57:02.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:02 vm10 ceph-mon[48982]: Reconfiguring daemon prometheus.a on vm10 2026-03-08T23:57:02.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:02 vm10 ceph-mon[48982]: pgmap v43: 161 pgs: 161 active+clean; 456 KiB data, 57 MiB used, 160 GiB / 160 GiB avail; 132 KiB/s rd, 5.7 KiB/s wr, 266 op/s 2026-03-08T23:57:02.578 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 08 23:57:02 vm10 podman[65841]: 2026-03-08 23:57:02.272916995 +0000 UTC m=+0.047100080 container remove 9570e658ccd114778a7d09f8f20c1ab17f9e872371121206959b318ef4d4d8e4 (image=quay.io/prometheus/prometheus:v2.33.4, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-08T23:57:02.578 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 08 23:57:02 vm10 bash[65841]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a 2026-03-08T23:57:02.578 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 08 23:57:02 vm10 bash[65858]: Error: no container with name or ID "ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus.a" found: no such container 2026-03-08T23:57:02.578 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 08 23:57:02 vm10 systemd[1]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@prometheus.a.service: Deactivated successfully. 2026-03-08T23:57:02.578 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 08 23:57:02 vm10 systemd[1]: Stopped Ceph prometheus.a for fdcbddf6-1b49-11f1-80b0-7392062373f9. 2026-03-08T23:57:02.578 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 08 23:57:02 vm10 systemd[1]: Starting Ceph prometheus.a for fdcbddf6-1b49-11f1-80b0-7392062373f9... 2026-03-08T23:57:02.578 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 08 23:57:02 vm10 podman[65902]: 2026-03-08 23:57:02.418939563 +0000 UTC m=+0.020861708 container create 60b4398433db55f3f63cc439bd8d81cb927296c1d68ccf45c47864b2281a6b2d (image=quay.io/prometheus/prometheus:v2.33.4, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-08T23:57:02.578 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 08 23:57:02 vm10 podman[65902]: 2026-03-08 23:57:02.448239484 +0000 UTC m=+0.050161629 container init 60b4398433db55f3f63cc439bd8d81cb927296c1d68ccf45c47864b2281a6b2d (image=quay.io/prometheus/prometheus:v2.33.4, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-08T23:57:02.578 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 08 23:57:02 vm10 podman[65902]: 2026-03-08 23:57:02.450997206 +0000 UTC m=+0.052919351 container start 60b4398433db55f3f63cc439bd8d81cb927296c1d68ccf45c47864b2281a6b2d (image=quay.io/prometheus/prometheus:v2.33.4, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-08T23:57:02.578 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 08 23:57:02 vm10 bash[65902]: 60b4398433db55f3f63cc439bd8d81cb927296c1d68ccf45c47864b2281a6b2d 2026-03-08T23:57:02.578 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 08 23:57:02 vm10 podman[65902]: 2026-03-08 23:57:02.410137767 +0000 UTC m=+0.012059913 image pull 514e6a882f6e74806a5856468489eeff8d7106095557578da96935e4d0ba4d9d quay.io/prometheus/prometheus:v2.33.4 2026-03-08T23:57:02.578 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 08 23:57:02 vm10 systemd[1]: Started Ceph prometheus.a for fdcbddf6-1b49-11f1-80b0-7392062373f9. 2026-03-08T23:57:02.578 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 08 23:57:02 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[65912]: ts=2026-03-08T23:57:02.498Z caller=main.go:475 level=info msg="No time or size retention was set so using the default time retention" duration=15d 2026-03-08T23:57:02.578 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 08 23:57:02 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[65912]: ts=2026-03-08T23:57:02.498Z caller=main.go:512 level=info msg="Starting Prometheus" version="(version=2.33.4, branch=HEAD, revision=83032011a5d3e6102624fe58241a374a7201fee8)" 2026-03-08T23:57:02.578 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 08 23:57:02 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[65912]: ts=2026-03-08T23:57:02.499Z caller=main.go:517 level=info build_context="(go=go1.17.7, user=root@d13bf69e7be8, date=20220222-16:51:28)" 2026-03-08T23:57:02.578 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 08 23:57:02 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[65912]: ts=2026-03-08T23:57:02.499Z caller=main.go:518 level=info host_details="(Linux 5.14.0-686.el9.x86_64 #1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026 x86_64 vm10 (none))" 2026-03-08T23:57:02.578 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 08 23:57:02 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[65912]: ts=2026-03-08T23:57:02.499Z caller=main.go:519 level=info fd_limits="(soft=1048576, hard=1048576)" 2026-03-08T23:57:02.578 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 08 23:57:02 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[65912]: ts=2026-03-08T23:57:02.499Z caller=main.go:520 level=info vm_limits="(soft=unlimited, hard=unlimited)" 2026-03-08T23:57:02.578 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 08 23:57:02 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[65912]: ts=2026-03-08T23:57:02.499Z caller=web.go:570 level=info component=web msg="Start listening for connections" address=:9095 2026-03-08T23:57:02.578 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 08 23:57:02 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[65912]: ts=2026-03-08T23:57:02.500Z caller=main.go:923 level=info msg="Starting TSDB ..." 2026-03-08T23:57:02.578 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 08 23:57:02 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[65912]: ts=2026-03-08T23:57:02.502Z caller=head.go:493 level=info component=tsdb msg="Replaying on-disk memory mappable chunks if any" 2026-03-08T23:57:02.578 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 08 23:57:02 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[65912]: ts=2026-03-08T23:57:02.502Z caller=head.go:527 level=info component=tsdb msg="On-disk memory mappable chunks replay completed" duration=1.032µs 2026-03-08T23:57:02.578 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 08 23:57:02 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[65912]: ts=2026-03-08T23:57:02.502Z caller=head.go:533 level=info component=tsdb msg="Replaying WAL, this may take a while" 2026-03-08T23:57:02.578 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 08 23:57:02 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[65912]: ts=2026-03-08T23:57:02.502Z caller=tls_config.go:195 level=info component=web msg="TLS is disabled." http2=false 2026-03-08T23:57:02.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:02 vm04 ceph-mon[46823]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:6800/1056094101"}]': finished 2026-03-08T23:57:02.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:02 vm04 ceph-mon[46823]: osdmap e63: 8 total, 8 up, 8 in 2026-03-08T23:57:02.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:02 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/1557704601' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/2797711501"}]: dispatch 2026-03-08T23:57:02.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:02 vm04 ceph-mon[46823]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/2797711501"}]: dispatch 2026-03-08T23:57:02.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:02 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:57:02.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:02 vm04 ceph-mon[46823]: Reconfiguring prometheus.a (dependencies changed)... 2026-03-08T23:57:02.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:02 vm04 ceph-mon[46823]: Reconfiguring daemon prometheus.a on vm10 2026-03-08T23:57:02.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:02 vm04 ceph-mon[46823]: pgmap v43: 161 pgs: 161 active+clean; 456 KiB data, 57 MiB used, 160 GiB / 160 GiB avail; 132 KiB/s rd, 5.7 KiB/s wr, 266 op/s 2026-03-08T23:57:02.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:02 vm04 ceph-mon[51053]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:6800/1056094101"}]': finished 2026-03-08T23:57:02.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:02 vm04 ceph-mon[51053]: osdmap e63: 8 total, 8 up, 8 in 2026-03-08T23:57:02.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:02 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/1557704601' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/2797711501"}]: dispatch 2026-03-08T23:57:02.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:02 vm04 ceph-mon[51053]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/2797711501"}]: dispatch 2026-03-08T23:57:02.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:02 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:57:02.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:02 vm04 ceph-mon[51053]: Reconfiguring prometheus.a (dependencies changed)... 2026-03-08T23:57:02.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:02 vm04 ceph-mon[51053]: Reconfiguring daemon prometheus.a on vm10 2026-03-08T23:57:02.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:02 vm04 ceph-mon[51053]: pgmap v43: 161 pgs: 161 active+clean; 456 KiB data, 57 MiB used, 160 GiB / 160 GiB avail; 132 KiB/s rd, 5.7 KiB/s wr, 266 op/s 2026-03-08T23:57:03.540 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:03 vm10 ceph-mon[48982]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/2797711501"}]': finished 2026-03-08T23:57:03.541 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:03 vm10 ceph-mon[48982]: osdmap e64: 8 total, 8 up, 8 in 2026-03-08T23:57:03.541 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:03 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:57:03.541 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:03 vm10 ceph-mon[48982]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-08T23:57:03.541 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:03 vm10 ceph-mon[48982]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-08T23:57:03.541 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:03 vm10 ceph-mon[48982]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://192.168.123.104:9093"}]: dispatch 2026-03-08T23:57:03.541 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:03 vm10 ceph-mon[48982]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://192.168.123.104:9093"}]: dispatch 2026-03-08T23:57:03.541 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:03 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:57:03.541 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:03 vm10 ceph-mon[48982]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-08T23:57:03.541 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:03 vm10 ceph-mon[48982]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-08T23:57:03.541 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:03 vm10 ceph-mon[48982]: Adding iSCSI gateway http://:@192.168.123.104:5000 to Dashboard 2026-03-08T23:57:03.541 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:03 vm10 ceph-mon[48982]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-08T23:57:03.541 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:03 vm10 ceph-mon[48982]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-08T23:57:03.541 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:03 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:57:03.541 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:03 vm10 ceph-mon[48982]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "dashboard iscsi-gateway-add", "name": "vm04"}]: dispatch 2026-03-08T23:57:03.541 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:03 vm10 ceph-mon[48982]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-add", "name": "vm04"}]: dispatch 2026-03-08T23:57:03.541 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:03 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:57:03.541 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:03 vm10 ceph-mon[48982]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-08T23:57:03.541 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:03 vm10 ceph-mon[48982]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-08T23:57:03.541 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:03 vm10 ceph-mon[48982]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://192.168.123.110:9095"}]: dispatch 2026-03-08T23:57:03.541 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:03 vm10 ceph-mon[48982]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://192.168.123.110:9095"}]: dispatch 2026-03-08T23:57:03.541 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:03 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:57:03.541 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:03 vm10 ceph-mon[48982]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-08T23:57:03.541 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:03 vm10 ceph-mon[48982]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-08T23:57:03.541 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:03 vm10 ceph-mon[48982]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://192.168.123.110:3000"}]: dispatch 2026-03-08T23:57:03.541 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:03 vm10 ceph-mon[48982]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://192.168.123.110:3000"}]: dispatch 2026-03-08T23:57:03.541 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:03 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:57:03.541 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:03 vm10 ceph-mon[48982]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:57:03.541 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:03 vm10 ceph-mon[48982]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:57:03.541 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:03 vm10 ceph-mon[48982]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:57:03.541 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:03 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/3909050210' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/285198153"}]: dispatch 2026-03-08T23:57:03.541 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:03 vm10 ceph-mon[48982]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/285198153"}]: dispatch 2026-03-08T23:57:03.541 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:03 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:57:03.543 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:03 vm04 ceph-mon[46823]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/2797711501"}]': finished 2026-03-08T23:57:03.543 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:03 vm04 ceph-mon[46823]: osdmap e64: 8 total, 8 up, 8 in 2026-03-08T23:57:03.543 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:03 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:57:03.543 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:03 vm04 ceph-mon[46823]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-08T23:57:03.543 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:03 vm04 ceph-mon[46823]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-08T23:57:03.543 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:03 vm04 ceph-mon[46823]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://192.168.123.104:9093"}]: dispatch 2026-03-08T23:57:03.543 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:03 vm04 ceph-mon[46823]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://192.168.123.104:9093"}]: dispatch 2026-03-08T23:57:03.544 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:03 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:57:03.544 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:03 vm04 ceph-mon[46823]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-08T23:57:03.544 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:03 vm04 ceph-mon[46823]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-08T23:57:03.544 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:03 vm04 ceph-mon[46823]: Adding iSCSI gateway http://:@192.168.123.104:5000 to Dashboard 2026-03-08T23:57:03.544 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:03 vm04 ceph-mon[46823]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-08T23:57:03.544 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:03 vm04 ceph-mon[46823]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-08T23:57:03.544 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:03 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:57:03.544 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:03 vm04 ceph-mon[46823]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "dashboard iscsi-gateway-add", "name": "vm04"}]: dispatch 2026-03-08T23:57:03.544 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:03 vm04 ceph-mon[46823]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-add", "name": "vm04"}]: dispatch 2026-03-08T23:57:03.544 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:03 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:57:03.544 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:03 vm04 ceph-mon[46823]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-08T23:57:03.544 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:03 vm04 ceph-mon[46823]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-08T23:57:03.544 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:03 vm04 ceph-mon[46823]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://192.168.123.110:9095"}]: dispatch 2026-03-08T23:57:03.544 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:03 vm04 ceph-mon[46823]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://192.168.123.110:9095"}]: dispatch 2026-03-08T23:57:03.544 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:03 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:57:03.544 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:03 vm04 ceph-mon[46823]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-08T23:57:03.544 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:03 vm04 ceph-mon[46823]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-08T23:57:03.544 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:03 vm04 ceph-mon[46823]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://192.168.123.110:3000"}]: dispatch 2026-03-08T23:57:03.544 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:03 vm04 ceph-mon[46823]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://192.168.123.110:3000"}]: dispatch 2026-03-08T23:57:03.544 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:03 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:57:03.544 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:03 vm04 ceph-mon[46823]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:57:03.544 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:03 vm04 ceph-mon[46823]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:57:03.544 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:03 vm04 ceph-mon[46823]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:57:03.544 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:03 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/3909050210' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/285198153"}]: dispatch 2026-03-08T23:57:03.544 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:03 vm04 ceph-mon[46823]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/285198153"}]: dispatch 2026-03-08T23:57:03.544 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:03 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:57:03.544 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:03 vm04 ceph-mon[51053]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/2797711501"}]': finished 2026-03-08T23:57:03.544 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:03 vm04 ceph-mon[51053]: osdmap e64: 8 total, 8 up, 8 in 2026-03-08T23:57:03.544 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:03 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:57:03.544 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:03 vm04 ceph-mon[51053]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-08T23:57:03.544 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:03 vm04 ceph-mon[51053]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-08T23:57:03.544 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:03 vm04 ceph-mon[51053]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://192.168.123.104:9093"}]: dispatch 2026-03-08T23:57:03.544 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:03 vm04 ceph-mon[51053]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://192.168.123.104:9093"}]: dispatch 2026-03-08T23:57:03.544 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:03 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:57:03.544 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:03 vm04 ceph-mon[51053]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-08T23:57:03.544 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:03 vm04 ceph-mon[51053]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-08T23:57:03.544 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:03 vm04 ceph-mon[51053]: Adding iSCSI gateway http://:@192.168.123.104:5000 to Dashboard 2026-03-08T23:57:03.545 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:03 vm04 ceph-mon[51053]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-08T23:57:03.545 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:03 vm04 ceph-mon[51053]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-08T23:57:03.545 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:03 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:57:03.545 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:03 vm04 ceph-mon[51053]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "dashboard iscsi-gateway-add", "name": "vm04"}]: dispatch 2026-03-08T23:57:03.545 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:03 vm04 ceph-mon[51053]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-add", "name": "vm04"}]: dispatch 2026-03-08T23:57:03.545 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:03 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:57:03.545 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:03 vm04 ceph-mon[51053]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-08T23:57:03.545 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:03 vm04 ceph-mon[51053]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-08T23:57:03.545 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:03 vm04 ceph-mon[51053]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://192.168.123.110:9095"}]: dispatch 2026-03-08T23:57:03.545 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:03 vm04 ceph-mon[51053]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://192.168.123.110:9095"}]: dispatch 2026-03-08T23:57:03.545 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:03 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:57:03.545 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:03 vm04 ceph-mon[51053]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-08T23:57:03.545 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:03 vm04 ceph-mon[51053]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-08T23:57:03.545 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:03 vm04 ceph-mon[51053]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://192.168.123.110:3000"}]: dispatch 2026-03-08T23:57:03.545 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:03 vm04 ceph-mon[51053]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://192.168.123.110:3000"}]: dispatch 2026-03-08T23:57:03.545 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:03 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:57:03.545 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:03 vm04 ceph-mon[51053]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:57:03.545 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:03 vm04 ceph-mon[51053]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:57:03.545 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:03 vm04 ceph-mon[51053]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:57:03.545 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:03 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/3909050210' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/285198153"}]: dispatch 2026-03-08T23:57:03.545 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:03 vm04 ceph-mon[51053]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/285198153"}]: dispatch 2026-03-08T23:57:03.545 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:03 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:57:03.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:57:03 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=info ts=2026-03-08T23:57:03.725Z caller=cluster.go:696 component=cluster msg="gossip not settled" polls=0 before=0 now=1 elapsed=2.002874953s 2026-03-08T23:57:04.327 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 08 23:57:03 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[65912]: ts=2026-03-08T23:57:03.927Z caller=head.go:604 level=info component=tsdb msg="WAL segment loaded" segment=0 maxSegment=1 2026-03-08T23:57:04.327 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 08 23:57:03 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[65912]: ts=2026-03-08T23:57:03.929Z caller=head.go:604 level=info component=tsdb msg="WAL segment loaded" segment=1 maxSegment=1 2026-03-08T23:57:04.328 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 08 23:57:03 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[65912]: ts=2026-03-08T23:57:03.929Z caller=head.go:610 level=info component=tsdb msg="WAL replay completed" checkpoint_replay_duration=25.839µs wal_replay_duration=1.426370648s total_replay_duration=1.426508938s 2026-03-08T23:57:04.328 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 08 23:57:03 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[65912]: ts=2026-03-08T23:57:03.931Z caller=main.go:944 level=info fs_type=XFS_SUPER_MAGIC 2026-03-08T23:57:04.328 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 08 23:57:03 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[65912]: ts=2026-03-08T23:57:03.931Z caller=main.go:947 level=info msg="TSDB started" 2026-03-08T23:57:04.328 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 08 23:57:03 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[65912]: ts=2026-03-08T23:57:03.931Z caller=main.go:1128 level=info msg="Loading configuration file" filename=/etc/prometheus/prometheus.yml 2026-03-08T23:57:04.328 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 08 23:57:03 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[65912]: ts=2026-03-08T23:57:03.943Z caller=main.go:1165 level=info msg="Completed loading of configuration file" filename=/etc/prometheus/prometheus.yml totalDuration=11.71615ms db_storage=762ns remote_storage=2.385µs web_handler=621ns query_engine=1.071µs scrape=592.788µs scrape_sd=19.807µs notify=18.244µs notify_sd=4.387µs rules=10.785772ms 2026-03-08T23:57:04.328 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 08 23:57:03 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[65912]: ts=2026-03-08T23:57:03.943Z caller=main.go:896 level=info msg="Server is ready to receive web requests." 2026-03-08T23:57:04.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:04 vm10 ceph-mon[48982]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/285198153"}]': finished 2026-03-08T23:57:04.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:04 vm10 ceph-mon[48982]: osdmap e65: 8 total, 8 up, 8 in 2026-03-08T23:57:04.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:04 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:57:04.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:04 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:57:04.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:04 vm10 ceph-mon[48982]: Checking dashboard <-> RGW credentials 2026-03-08T23:57:04.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:04 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/3769795002' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:6801/2282925904"}]: dispatch 2026-03-08T23:57:04.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:04 vm10 ceph-mon[48982]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:6801/2282925904"}]: dispatch 2026-03-08T23:57:04.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:04 vm10 ceph-mon[48982]: pgmap v46: 161 pgs: 161 active+clean; 456 KiB data, 57 MiB used, 160 GiB / 160 GiB avail 2026-03-08T23:57:04.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:04 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:57:04.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:04 vm04 ceph-mon[46823]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/285198153"}]': finished 2026-03-08T23:57:04.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:04 vm04 ceph-mon[46823]: osdmap e65: 8 total, 8 up, 8 in 2026-03-08T23:57:04.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:04 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:57:04.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:04 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:57:04.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:04 vm04 ceph-mon[46823]: Checking dashboard <-> RGW credentials 2026-03-08T23:57:04.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:04 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/3769795002' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:6801/2282925904"}]: dispatch 2026-03-08T23:57:04.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:04 vm04 ceph-mon[46823]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:6801/2282925904"}]: dispatch 2026-03-08T23:57:04.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:04 vm04 ceph-mon[46823]: pgmap v46: 161 pgs: 161 active+clean; 456 KiB data, 57 MiB used, 160 GiB / 160 GiB avail 2026-03-08T23:57:04.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:04 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:57:04.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:04 vm04 ceph-mon[51053]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/285198153"}]': finished 2026-03-08T23:57:04.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:04 vm04 ceph-mon[51053]: osdmap e65: 8 total, 8 up, 8 in 2026-03-08T23:57:04.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:04 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:57:04.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:04 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:57:04.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:04 vm04 ceph-mon[51053]: Checking dashboard <-> RGW credentials 2026-03-08T23:57:04.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:04 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/3769795002' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:6801/2282925904"}]: dispatch 2026-03-08T23:57:04.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:04 vm04 ceph-mon[51053]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:6801/2282925904"}]: dispatch 2026-03-08T23:57:04.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:04 vm04 ceph-mon[51053]: pgmap v46: 161 pgs: 161 active+clean; 456 KiB data, 57 MiB used, 160 GiB / 160 GiB avail 2026-03-08T23:57:04.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:04 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:57:06.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:05 vm10 ceph-mon[48982]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:6801/2282925904"}]': finished 2026-03-08T23:57:06.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:05 vm10 ceph-mon[48982]: osdmap e66: 8 total, 8 up, 8 in 2026-03-08T23:57:06.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:05 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/1111116869' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/1020657381"}]: dispatch 2026-03-08T23:57:06.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:05 vm10 ceph-mon[48982]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/1020657381"}]: dispatch 2026-03-08T23:57:06.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:05 vm04 ceph-mon[51053]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:6801/2282925904"}]': finished 2026-03-08T23:57:06.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:05 vm04 ceph-mon[51053]: osdmap e66: 8 total, 8 up, 8 in 2026-03-08T23:57:06.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:05 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/1111116869' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/1020657381"}]: dispatch 2026-03-08T23:57:06.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:05 vm04 ceph-mon[51053]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/1020657381"}]: dispatch 2026-03-08T23:57:06.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:05 vm04 ceph-mon[46823]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:6801/2282925904"}]': finished 2026-03-08T23:57:06.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:05 vm04 ceph-mon[46823]: osdmap e66: 8 total, 8 up, 8 in 2026-03-08T23:57:06.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:05 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/1111116869' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/1020657381"}]: dispatch 2026-03-08T23:57:06.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:05 vm04 ceph-mon[46823]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/1020657381"}]: dispatch 2026-03-08T23:57:07.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:06 vm10 ceph-mon[48982]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/1020657381"}]': finished 2026-03-08T23:57:07.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:06 vm10 ceph-mon[48982]: osdmap e67: 8 total, 8 up, 8 in 2026-03-08T23:57:07.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:06 vm10 ceph-mon[48982]: pgmap v49: 161 pgs: 161 active+clean; 457 KiB data, 59 MiB used, 160 GiB / 160 GiB avail; 79 KiB/s rd, 767 B/s wr, 127 op/s 2026-03-08T23:57:07.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:06 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/1818866265' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/4258028872"}]: dispatch 2026-03-08T23:57:07.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:06 vm10 ceph-mon[48982]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/4258028872"}]: dispatch 2026-03-08T23:57:07.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:06 vm04 ceph-mon[51053]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/1020657381"}]': finished 2026-03-08T23:57:07.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:06 vm04 ceph-mon[51053]: osdmap e67: 8 total, 8 up, 8 in 2026-03-08T23:57:07.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:06 vm04 ceph-mon[51053]: pgmap v49: 161 pgs: 161 active+clean; 457 KiB data, 59 MiB used, 160 GiB / 160 GiB avail; 79 KiB/s rd, 767 B/s wr, 127 op/s 2026-03-08T23:57:07.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:06 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/1818866265' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/4258028872"}]: dispatch 2026-03-08T23:57:07.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:06 vm04 ceph-mon[51053]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/4258028872"}]: dispatch 2026-03-08T23:57:07.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:06 vm04 ceph-mon[46823]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/1020657381"}]': finished 2026-03-08T23:57:07.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:06 vm04 ceph-mon[46823]: osdmap e67: 8 total, 8 up, 8 in 2026-03-08T23:57:07.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:06 vm04 ceph-mon[46823]: pgmap v49: 161 pgs: 161 active+clean; 457 KiB data, 59 MiB used, 160 GiB / 160 GiB avail; 79 KiB/s rd, 767 B/s wr, 127 op/s 2026-03-08T23:57:07.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:06 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/1818866265' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/4258028872"}]: dispatch 2026-03-08T23:57:07.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:06 vm04 ceph-mon[46823]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/4258028872"}]: dispatch 2026-03-08T23:57:08.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:07 vm10 ceph-mon[48982]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/4258028872"}]': finished 2026-03-08T23:57:08.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:07 vm10 ceph-mon[48982]: osdmap e68: 8 total, 8 up, 8 in 2026-03-08T23:57:08.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:07 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/2497858210' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/3816463695"}]: dispatch 2026-03-08T23:57:08.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:07 vm10 ceph-mon[48982]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/3816463695"}]: dispatch 2026-03-08T23:57:08.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:07 vm04 ceph-mon[51053]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/4258028872"}]': finished 2026-03-08T23:57:08.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:07 vm04 ceph-mon[51053]: osdmap e68: 8 total, 8 up, 8 in 2026-03-08T23:57:08.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:07 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/2497858210' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/3816463695"}]: dispatch 2026-03-08T23:57:08.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:07 vm04 ceph-mon[51053]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/3816463695"}]: dispatch 2026-03-08T23:57:08.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:07 vm04 ceph-mon[46823]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/4258028872"}]': finished 2026-03-08T23:57:08.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:07 vm04 ceph-mon[46823]: osdmap e68: 8 total, 8 up, 8 in 2026-03-08T23:57:08.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:07 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/2497858210' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/3816463695"}]: dispatch 2026-03-08T23:57:08.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:07 vm04 ceph-mon[46823]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/3816463695"}]: dispatch 2026-03-08T23:57:09.035 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:08 vm04 ceph-mon[51053]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/3816463695"}]': finished 2026-03-08T23:57:09.035 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:08 vm04 ceph-mon[51053]: osdmap e69: 8 total, 8 up, 8 in 2026-03-08T23:57:09.035 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:08 vm04 ceph-mon[51053]: pgmap v52: 161 pgs: 161 active+clean; 457 KiB data, 59 MiB used, 160 GiB / 160 GiB avail; 79 KiB/s rd, 767 B/s wr, 127 op/s 2026-03-08T23:57:09.035 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:08 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/1787852001' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/2988861187"}]: dispatch 2026-03-08T23:57:09.035 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:08 vm04 ceph-mon[51053]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/2988861187"}]: dispatch 2026-03-08T23:57:09.035 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:08 vm04 ceph-mon[46823]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/3816463695"}]': finished 2026-03-08T23:57:09.035 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:08 vm04 ceph-mon[46823]: osdmap e69: 8 total, 8 up, 8 in 2026-03-08T23:57:09.035 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:08 vm04 ceph-mon[46823]: pgmap v52: 161 pgs: 161 active+clean; 457 KiB data, 59 MiB used, 160 GiB / 160 GiB avail; 79 KiB/s rd, 767 B/s wr, 127 op/s 2026-03-08T23:57:09.035 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:08 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/1787852001' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/2988861187"}]: dispatch 2026-03-08T23:57:09.035 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:08 vm04 ceph-mon[46823]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/2988861187"}]: dispatch 2026-03-08T23:57:09.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:08 vm10 ceph-mon[48982]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/3816463695"}]': finished 2026-03-08T23:57:09.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:08 vm10 ceph-mon[48982]: osdmap e69: 8 total, 8 up, 8 in 2026-03-08T23:57:09.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:08 vm10 ceph-mon[48982]: pgmap v52: 161 pgs: 161 active+clean; 457 KiB data, 59 MiB used, 160 GiB / 160 GiB avail; 79 KiB/s rd, 767 B/s wr, 127 op/s 2026-03-08T23:57:09.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:08 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/1787852001' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/2988861187"}]: dispatch 2026-03-08T23:57:09.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:08 vm10 ceph-mon[48982]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/2988861187"}]: dispatch 2026-03-08T23:57:09.350 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:57:09 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: ::ffff:192.168.123.110 - - [08/Mar/2026:23:57:09] "GET /metrics HTTP/1.1" 200 207563 "" "Prometheus/2.33.4" 2026-03-08T23:57:10.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:09 vm10 ceph-mon[48982]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/2988861187"}]': finished 2026-03-08T23:57:10.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:09 vm10 ceph-mon[48982]: osdmap e70: 8 total, 8 up, 8 in 2026-03-08T23:57:10.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:09 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/3120246100' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/1723664430"}]: dispatch 2026-03-08T23:57:10.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:09 vm10 ceph-mon[48982]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-08T23:57:10.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:09 vm04 ceph-mon[51053]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/2988861187"}]': finished 2026-03-08T23:57:10.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:09 vm04 ceph-mon[51053]: osdmap e70: 8 total, 8 up, 8 in 2026-03-08T23:57:10.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:09 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/3120246100' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/1723664430"}]: dispatch 2026-03-08T23:57:10.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:09 vm04 ceph-mon[51053]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-08T23:57:10.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:09 vm04 ceph-mon[46823]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/2988861187"}]': finished 2026-03-08T23:57:10.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:09 vm04 ceph-mon[46823]: osdmap e70: 8 total, 8 up, 8 in 2026-03-08T23:57:10.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:09 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/3120246100' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/1723664430"}]: dispatch 2026-03-08T23:57:10.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:09 vm04 ceph-mon[46823]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-08T23:57:11.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:10 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/3120246100' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/1723664430"}]': finished 2026-03-08T23:57:11.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:10 vm10 ceph-mon[48982]: osdmap e71: 8 total, 8 up, 8 in 2026-03-08T23:57:11.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:10 vm10 ceph-mon[48982]: pgmap v55: 161 pgs: 161 active+clean; 457 KiB data, 60 MiB used, 160 GiB / 160 GiB avail; 6.7 KiB/s rd, 6 op/s 2026-03-08T23:57:11.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:10 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/4126175466' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:6801/1056094101"}]: dispatch 2026-03-08T23:57:11.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:10 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/3120246100' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/1723664430"}]': finished 2026-03-08T23:57:11.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:10 vm04 ceph-mon[51053]: osdmap e71: 8 total, 8 up, 8 in 2026-03-08T23:57:11.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:10 vm04 ceph-mon[51053]: pgmap v55: 161 pgs: 161 active+clean; 457 KiB data, 60 MiB used, 160 GiB / 160 GiB avail; 6.7 KiB/s rd, 6 op/s 2026-03-08T23:57:11.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:10 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/4126175466' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:6801/1056094101"}]: dispatch 2026-03-08T23:57:11.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:10 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/3120246100' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/1723664430"}]': finished 2026-03-08T23:57:11.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:10 vm04 ceph-mon[46823]: osdmap e71: 8 total, 8 up, 8 in 2026-03-08T23:57:11.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:10 vm04 ceph-mon[46823]: pgmap v55: 161 pgs: 161 active+clean; 457 KiB data, 60 MiB used, 160 GiB / 160 GiB avail; 6.7 KiB/s rd, 6 op/s 2026-03-08T23:57:11.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:10 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/4126175466' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:6801/1056094101"}]: dispatch 2026-03-08T23:57:12.100 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:57:11 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=info ts=2026-03-08T23:57:11.728Z caller=cluster.go:688 component=cluster msg="gossip settled; proceeding" elapsed=10.006525524s 2026-03-08T23:57:12.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:11 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/4126175466' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:6801/1056094101"}]': finished 2026-03-08T23:57:12.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:11 vm04 ceph-mon[51053]: osdmap e72: 8 total, 8 up, 8 in 2026-03-08T23:57:12.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:11 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/3961302729' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/2663707440"}]: dispatch 2026-03-08T23:57:12.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:11 vm04 ceph-mon[51053]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/2663707440"}]: dispatch 2026-03-08T23:57:12.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:11 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/4126175466' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:6801/1056094101"}]': finished 2026-03-08T23:57:12.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:11 vm04 ceph-mon[46823]: osdmap e72: 8 total, 8 up, 8 in 2026-03-08T23:57:12.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:11 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/3961302729' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/2663707440"}]: dispatch 2026-03-08T23:57:12.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:11 vm04 ceph-mon[46823]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/2663707440"}]: dispatch 2026-03-08T23:57:12.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:11 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/4126175466' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:6801/1056094101"}]': finished 2026-03-08T23:57:12.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:11 vm10 ceph-mon[48982]: osdmap e72: 8 total, 8 up, 8 in 2026-03-08T23:57:12.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:11 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/3961302729' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/2663707440"}]: dispatch 2026-03-08T23:57:12.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:11 vm10 ceph-mon[48982]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/2663707440"}]: dispatch 2026-03-08T23:57:13.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:12 vm10 ceph-mon[48982]: pgmap v57: 161 pgs: 161 active+clean; 457 KiB data, 60 MiB used, 160 GiB / 160 GiB avail; 6.6 KiB/s rd, 6 op/s 2026-03-08T23:57:13.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:12 vm10 ceph-mon[48982]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/2663707440"}]': finished 2026-03-08T23:57:13.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:12 vm10 ceph-mon[48982]: osdmap e73: 8 total, 8 up, 8 in 2026-03-08T23:57:13.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:12 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/2662093209' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/1412084899"}]: dispatch 2026-03-08T23:57:13.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:12 vm04 ceph-mon[51053]: pgmap v57: 161 pgs: 161 active+clean; 457 KiB data, 60 MiB used, 160 GiB / 160 GiB avail; 6.6 KiB/s rd, 6 op/s 2026-03-08T23:57:13.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:12 vm04 ceph-mon[51053]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/2663707440"}]': finished 2026-03-08T23:57:13.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:12 vm04 ceph-mon[51053]: osdmap e73: 8 total, 8 up, 8 in 2026-03-08T23:57:13.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:12 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/2662093209' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/1412084899"}]: dispatch 2026-03-08T23:57:13.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:12 vm04 ceph-mon[46823]: pgmap v57: 161 pgs: 161 active+clean; 457 KiB data, 60 MiB used, 160 GiB / 160 GiB avail; 6.6 KiB/s rd, 6 op/s 2026-03-08T23:57:13.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:12 vm04 ceph-mon[46823]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/2663707440"}]': finished 2026-03-08T23:57:13.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:12 vm04 ceph-mon[46823]: osdmap e73: 8 total, 8 up, 8 in 2026-03-08T23:57:13.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:12 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/2662093209' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/1412084899"}]: dispatch 2026-03-08T23:57:14.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:13 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/2662093209' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/1412084899"}]': finished 2026-03-08T23:57:14.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:13 vm10 ceph-mon[48982]: osdmap e74: 8 total, 8 up, 8 in 2026-03-08T23:57:14.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:13 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/3795401505' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:6800/2282925904"}]: dispatch 2026-03-08T23:57:14.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:13 vm10 ceph-mon[48982]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:6800/2282925904"}]: dispatch 2026-03-08T23:57:14.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:13 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/2662093209' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/1412084899"}]': finished 2026-03-08T23:57:14.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:13 vm04 ceph-mon[51053]: osdmap e74: 8 total, 8 up, 8 in 2026-03-08T23:57:14.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:13 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/3795401505' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:6800/2282925904"}]: dispatch 2026-03-08T23:57:14.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:13 vm04 ceph-mon[51053]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:6800/2282925904"}]: dispatch 2026-03-08T23:57:14.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:13 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/2662093209' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/1412084899"}]': finished 2026-03-08T23:57:14.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:13 vm04 ceph-mon[46823]: osdmap e74: 8 total, 8 up, 8 in 2026-03-08T23:57:14.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:13 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/3795401505' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:6800/2282925904"}]: dispatch 2026-03-08T23:57:14.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:13 vm04 ceph-mon[46823]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:6800/2282925904"}]: dispatch 2026-03-08T23:57:15.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:15 vm10 ceph-mon[48982]: pgmap v60: 161 pgs: 161 active+clean; 457 KiB data, 60 MiB used, 160 GiB / 160 GiB avail 2026-03-08T23:57:15.606 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:15 vm10 ceph-mon[48982]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:6800/2282925904"}]': finished 2026-03-08T23:57:15.606 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:15 vm10 ceph-mon[48982]: osdmap e75: 8 total, 8 up, 8 in 2026-03-08T23:57:15.606 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:15 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/1705012411' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:6800/3942489037"}]: dispatch 2026-03-08T23:57:15.606 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:15 vm10 ceph-mon[48982]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:6800/3942489037"}]: dispatch 2026-03-08T23:57:15.606 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:15 vm04 ceph-mon[51053]: pgmap v60: 161 pgs: 161 active+clean; 457 KiB data, 60 MiB used, 160 GiB / 160 GiB avail 2026-03-08T23:57:15.607 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:15 vm04 ceph-mon[51053]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:6800/2282925904"}]': finished 2026-03-08T23:57:15.607 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:15 vm04 ceph-mon[51053]: osdmap e75: 8 total, 8 up, 8 in 2026-03-08T23:57:15.607 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:15 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/1705012411' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:6800/3942489037"}]: dispatch 2026-03-08T23:57:15.607 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:15 vm04 ceph-mon[51053]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:6800/3942489037"}]: dispatch 2026-03-08T23:57:15.607 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:15 vm04 ceph-mon[46823]: pgmap v60: 161 pgs: 161 active+clean; 457 KiB data, 60 MiB used, 160 GiB / 160 GiB avail 2026-03-08T23:57:15.607 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:15 vm04 ceph-mon[46823]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:6800/2282925904"}]': finished 2026-03-08T23:57:15.607 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:15 vm04 ceph-mon[46823]: osdmap e75: 8 total, 8 up, 8 in 2026-03-08T23:57:15.607 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:15 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/1705012411' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:6800/3942489037"}]: dispatch 2026-03-08T23:57:15.607 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:15 vm04 ceph-mon[46823]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:6800/3942489037"}]: dispatch 2026-03-08T23:57:16.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:16 vm10 ceph-mon[48982]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:6800/3942489037"}]': finished 2026-03-08T23:57:16.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:16 vm10 ceph-mon[48982]: osdmap e76: 8 total, 8 up, 8 in 2026-03-08T23:57:16.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:16 vm10 ceph-mon[48982]: pgmap v63: 161 pgs: 161 active+clean; 457 KiB data, 61 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:57:16.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:16 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/2128625150' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:6801/3942489037"}]: dispatch 2026-03-08T23:57:16.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:16 vm04 ceph-mon[51053]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:6800/3942489037"}]': finished 2026-03-08T23:57:16.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:16 vm04 ceph-mon[51053]: osdmap e76: 8 total, 8 up, 8 in 2026-03-08T23:57:16.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:16 vm04 ceph-mon[51053]: pgmap v63: 161 pgs: 161 active+clean; 457 KiB data, 61 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:57:16.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:16 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/2128625150' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:6801/3942489037"}]: dispatch 2026-03-08T23:57:16.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:16 vm04 ceph-mon[46823]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:6800/3942489037"}]': finished 2026-03-08T23:57:16.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:16 vm04 ceph-mon[46823]: osdmap e76: 8 total, 8 up, 8 in 2026-03-08T23:57:16.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:16 vm04 ceph-mon[46823]: pgmap v63: 161 pgs: 161 active+clean; 457 KiB data, 61 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:57:16.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:16 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/2128625150' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:6801/3942489037"}]: dispatch 2026-03-08T23:57:17.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:17 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/2128625150' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:6801/3942489037"}]': finished 2026-03-08T23:57:17.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:17 vm10 ceph-mon[48982]: osdmap e77: 8 total, 8 up, 8 in 2026-03-08T23:57:17.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:17 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/2128625150' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:6801/3942489037"}]': finished 2026-03-08T23:57:17.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:17 vm04 ceph-mon[51053]: osdmap e77: 8 total, 8 up, 8 in 2026-03-08T23:57:17.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:17 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/2128625150' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:6801/3942489037"}]': finished 2026-03-08T23:57:17.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:17 vm04 ceph-mon[46823]: osdmap e77: 8 total, 8 up, 8 in 2026-03-08T23:57:18.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:18 vm10 ceph-mon[48982]: pgmap v65: 161 pgs: 161 active+clean; 457 KiB data, 61 MiB used, 160 GiB / 160 GiB avail; 1.0 KiB/s rd, 1 op/s 2026-03-08T23:57:18.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:18 vm10 ceph-mon[48982]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "2.0", "id": [7, 2]}]: dispatch 2026-03-08T23:57:18.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:18 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "2.0", "id": [7, 2]}]: dispatch 2026-03-08T23:57:18.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:18 vm10 ceph-mon[48982]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.e", "id": [1, 2]}]: dispatch 2026-03-08T23:57:18.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:18 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.e", "id": [1, 2]}]: dispatch 2026-03-08T23:57:18.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:18 vm10 ceph-mon[48982]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.f", "id": [1, 2]}]: dispatch 2026-03-08T23:57:18.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:18 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.f", "id": [1, 2]}]: dispatch 2026-03-08T23:57:18.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:18 vm10 ceph-mon[48982]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.1f", "id": [1, 2]}]: dispatch 2026-03-08T23:57:18.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:18 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.1f", "id": [1, 2]}]: dispatch 2026-03-08T23:57:18.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:18 vm10 ceph-mon[48982]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:57:18.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:18 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:57:18.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:18 vm10 ceph-mon[48982]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-08T23:57:18.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:18 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-08T23:57:18.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:18 vm04 ceph-mon[51053]: pgmap v65: 161 pgs: 161 active+clean; 457 KiB data, 61 MiB used, 160 GiB / 160 GiB avail; 1.0 KiB/s rd, 1 op/s 2026-03-08T23:57:18.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:18 vm04 ceph-mon[51053]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "2.0", "id": [7, 2]}]: dispatch 2026-03-08T23:57:18.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:18 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "2.0", "id": [7, 2]}]: dispatch 2026-03-08T23:57:18.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:18 vm04 ceph-mon[51053]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.e", "id": [1, 2]}]: dispatch 2026-03-08T23:57:18.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:18 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.e", "id": [1, 2]}]: dispatch 2026-03-08T23:57:18.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:18 vm04 ceph-mon[51053]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.f", "id": [1, 2]}]: dispatch 2026-03-08T23:57:18.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:18 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.f", "id": [1, 2]}]: dispatch 2026-03-08T23:57:18.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:18 vm04 ceph-mon[51053]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.1f", "id": [1, 2]}]: dispatch 2026-03-08T23:57:18.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:18 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.1f", "id": [1, 2]}]: dispatch 2026-03-08T23:57:18.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:18 vm04 ceph-mon[51053]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:57:18.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:18 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:57:18.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:18 vm04 ceph-mon[51053]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-08T23:57:18.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:18 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-08T23:57:18.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:18 vm04 ceph-mon[46823]: pgmap v65: 161 pgs: 161 active+clean; 457 KiB data, 61 MiB used, 160 GiB / 160 GiB avail; 1.0 KiB/s rd, 1 op/s 2026-03-08T23:57:18.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:18 vm04 ceph-mon[46823]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "2.0", "id": [7, 2]}]: dispatch 2026-03-08T23:57:18.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:18 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "2.0", "id": [7, 2]}]: dispatch 2026-03-08T23:57:18.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:18 vm04 ceph-mon[46823]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.e", "id": [1, 2]}]: dispatch 2026-03-08T23:57:18.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:18 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.e", "id": [1, 2]}]: dispatch 2026-03-08T23:57:18.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:18 vm04 ceph-mon[46823]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.f", "id": [1, 2]}]: dispatch 2026-03-08T23:57:18.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:18 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.f", "id": [1, 2]}]: dispatch 2026-03-08T23:57:18.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:18 vm04 ceph-mon[46823]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.1f", "id": [1, 2]}]: dispatch 2026-03-08T23:57:18.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:18 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.1f", "id": [1, 2]}]: dispatch 2026-03-08T23:57:18.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:18 vm04 ceph-mon[46823]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:57:18.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:18 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:57:18.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:18 vm04 ceph-mon[46823]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-08T23:57:18.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:18 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-08T23:57:19.327 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 08 23:57:18 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[50180]: ::ffff:192.168.123.110 - - [08/Mar/2026:23:57:18] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-08T23:57:19.350 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:57:19 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: ::ffff:192.168.123.110 - - [08/Mar/2026:23:57:19] "GET /metrics HTTP/1.1" 200 207659 "" "Prometheus/2.33.4" 2026-03-08T23:57:19.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:19 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "2.0", "id": [7, 2]}]': finished 2026-03-08T23:57:19.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:19 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.e", "id": [1, 2]}]': finished 2026-03-08T23:57:19.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:19 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.f", "id": [1, 2]}]': finished 2026-03-08T23:57:19.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:19 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.1f", "id": [1, 2]}]': finished 2026-03-08T23:57:19.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:19 vm10 ceph-mon[48982]: osdmap e78: 8 total, 8 up, 8 in 2026-03-08T23:57:19.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:19 vm10 ceph-mon[48982]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-08T23:57:19.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:19 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "2.0", "id": [7, 2]}]': finished 2026-03-08T23:57:19.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:19 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.e", "id": [1, 2]}]': finished 2026-03-08T23:57:19.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:19 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.f", "id": [1, 2]}]': finished 2026-03-08T23:57:19.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:19 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.1f", "id": [1, 2]}]': finished 2026-03-08T23:57:19.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:19 vm04 ceph-mon[51053]: osdmap e78: 8 total, 8 up, 8 in 2026-03-08T23:57:19.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:19 vm04 ceph-mon[51053]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-08T23:57:19.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:19 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "2.0", "id": [7, 2]}]': finished 2026-03-08T23:57:19.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:19 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.e", "id": [1, 2]}]': finished 2026-03-08T23:57:19.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:19 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.f", "id": [1, 2]}]': finished 2026-03-08T23:57:19.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:19 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.1f", "id": [1, 2]}]': finished 2026-03-08T23:57:19.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:19 vm04 ceph-mon[46823]: osdmap e78: 8 total, 8 up, 8 in 2026-03-08T23:57:19.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:19 vm04 ceph-mon[46823]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-08T23:57:20.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:20 vm10 ceph-mon[48982]: osdmap e79: 8 total, 8 up, 8 in 2026-03-08T23:57:20.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:20 vm10 ceph-mon[48982]: pgmap v68: 161 pgs: 4 peering, 157 active+clean; 457 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:57:20.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:20 vm04 ceph-mon[51053]: osdmap e79: 8 total, 8 up, 8 in 2026-03-08T23:57:20.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:20 vm04 ceph-mon[51053]: pgmap v68: 161 pgs: 4 peering, 157 active+clean; 457 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:57:20.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:20 vm04 ceph-mon[46823]: osdmap e79: 8 total, 8 up, 8 in 2026-03-08T23:57:20.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:20 vm04 ceph-mon[46823]: pgmap v68: 161 pgs: 4 peering, 157 active+clean; 457 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:57:23.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:23 vm10 ceph-mon[48982]: pgmap v69: 161 pgs: 4 peering, 157 active+clean; 457 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:57:23.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:23 vm04 ceph-mon[51053]: pgmap v69: 161 pgs: 4 peering, 157 active+clean; 457 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:57:23.384 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:22 vm04 ceph-mon[46823]: pgmap v69: 161 pgs: 4 peering, 157 active+clean; 457 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:57:23.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:57:23 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-08T23:57:23.513Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-08T23:57:23.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:57:23 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-08T23:57:23.514Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-08T23:57:23.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:57:23 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-08T23:57:23.514Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-08T23:57:23.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:57:23 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-08T23:57:23.515Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-08T23:57:26.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:25 vm10 ceph-mon[48982]: pgmap v70: 161 pgs: 4 peering, 157 active+clean; 457 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 700 B/s rd, 0 op/s 2026-03-08T23:57:26.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:25 vm04 ceph-mon[51053]: pgmap v70: 161 pgs: 4 peering, 157 active+clean; 457 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 700 B/s rd, 0 op/s 2026-03-08T23:57:26.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:25 vm04 ceph-mon[46823]: pgmap v70: 161 pgs: 4 peering, 157 active+clean; 457 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 700 B/s rd, 0 op/s 2026-03-08T23:57:27.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:26 vm10 ceph-mon[48982]: pgmap v71: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s; 0 B/s, 0 objects/s recovering 2026-03-08T23:57:27.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:26 vm04 ceph-mon[51053]: pgmap v71: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s; 0 B/s, 0 objects/s recovering 2026-03-08T23:57:27.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:26 vm04 ceph-mon[46823]: pgmap v71: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s; 0 B/s, 0 objects/s recovering 2026-03-08T23:57:29.227 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:57:29 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: ::ffff:192.168.123.110 - - [08/Mar/2026:23:57:29] "GET /metrics HTTP/1.1" 200 207659 "" "Prometheus/2.33.4" 2026-03-08T23:57:29.227 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:28 vm04 ceph-mon[51053]: pgmap v72: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s; 0 B/s, 0 objects/s recovering 2026-03-08T23:57:29.227 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:28 vm04 ceph-mon[46823]: pgmap v72: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s; 0 B/s, 0 objects/s recovering 2026-03-08T23:57:29.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:28 vm10 ceph-mon[48982]: pgmap v72: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s; 0 B/s, 0 objects/s recovering 2026-03-08T23:57:29.327 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 08 23:57:28 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[50180]: ::ffff:192.168.123.110 - - [08/Mar/2026:23:57:28] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-08T23:57:30.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:30 vm10 ceph-mon[48982]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-08T23:57:30.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:30 vm04 ceph-mon[51053]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-08T23:57:30.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:30 vm04 ceph-mon[46823]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-08T23:57:31.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:31 vm10 ceph-mon[48982]: pgmap v73: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 995 B/s rd, 0 op/s; 0 B/s, 0 objects/s recovering 2026-03-08T23:57:31.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:31 vm04 ceph-mon[51053]: pgmap v73: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 995 B/s rd, 0 op/s; 0 B/s, 0 objects/s recovering 2026-03-08T23:57:31.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:31 vm04 ceph-mon[46823]: pgmap v73: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 995 B/s rd, 0 op/s; 0 B/s, 0 objects/s recovering 2026-03-08T23:57:32.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:32 vm10 ceph-mon[48982]: pgmap v74: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s; 0 B/s, 0 objects/s recovering 2026-03-08T23:57:32.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:32 vm04 ceph-mon[51053]: pgmap v74: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s; 0 B/s, 0 objects/s recovering 2026-03-08T23:57:32.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:32 vm04 ceph-mon[46823]: pgmap v74: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s; 0 B/s, 0 objects/s recovering 2026-03-08T23:57:33.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:57:33 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=error ts=2026-03-08T23:57:33.504Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-08T23:57:33.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:57:33 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=error ts=2026-03-08T23:57:33.505Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-08T23:57:33.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:57:33 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-08T23:57:33.506Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-08T23:57:33.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:57:33 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-08T23:57:33.506Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-08T23:57:33.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:57:33 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-08T23:57:33.507Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-08T23:57:33.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:57:33 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-08T23:57:33.508Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-08T23:57:35.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:34 vm10 ceph-mon[48982]: pgmap v75: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s; 0 B/s, 0 objects/s recovering 2026-03-08T23:57:35.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:34 vm04 ceph-mon[51053]: pgmap v75: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s; 0 B/s, 0 objects/s recovering 2026-03-08T23:57:35.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:34 vm04 ceph-mon[46823]: pgmap v75: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s; 0 B/s, 0 objects/s recovering 2026-03-08T23:57:37.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:36 vm10 ceph-mon[48982]: pgmap v76: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s; 0 B/s, 0 objects/s recovering 2026-03-08T23:57:37.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:36 vm04 ceph-mon[51053]: pgmap v76: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s; 0 B/s, 0 objects/s recovering 2026-03-08T23:57:37.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:36 vm04 ceph-mon[46823]: pgmap v76: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s; 0 B/s, 0 objects/s recovering 2026-03-08T23:57:39.237 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:57:39 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: ::ffff:192.168.123.110 - - [08/Mar/2026:23:57:39] "GET /metrics HTTP/1.1" 200 207645 "" "Prometheus/2.33.4" 2026-03-08T23:57:39.237 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:38 vm04 ceph-mon[51053]: pgmap v77: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:57:39.237 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:38 vm04 ceph-mon[46823]: pgmap v77: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:57:39.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:38 vm10 ceph-mon[48982]: pgmap v77: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:57:39.327 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 08 23:57:38 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[50180]: ::ffff:192.168.123.110 - - [08/Mar/2026:23:57:38] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-08T23:57:40.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:39 vm10 ceph-mon[48982]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-08T23:57:40.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:39 vm04 ceph-mon[51053]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-08T23:57:40.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:39 vm04 ceph-mon[46823]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-08T23:57:41.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:40 vm10 ceph-mon[48982]: pgmap v78: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:57:41.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:40 vm04 ceph-mon[51053]: pgmap v78: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:57:41.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:40 vm04 ceph-mon[46823]: pgmap v78: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:57:42.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:42 vm10 ceph-mon[48982]: pgmap v79: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:57:42.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:42 vm04 ceph-mon[51053]: pgmap v79: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:57:42.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:42 vm04 ceph-mon[46823]: pgmap v79: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:57:43.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:57:43 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=error ts=2026-03-08T23:57:43.506Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-08T23:57:43.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:57:43 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=error ts=2026-03-08T23:57:43.506Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-08T23:57:43.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:57:43 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-08T23:57:43.508Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-08T23:57:43.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:57:43 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-08T23:57:43.509Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-08T23:57:43.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:57:43 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-08T23:57:43.510Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-08T23:57:43.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:57:43 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-08T23:57:43.511Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-08T23:57:45.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:44 vm10 ceph-mon[48982]: pgmap v80: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:57:45.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:44 vm04 ceph-mon[51053]: pgmap v80: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:57:45.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:44 vm04 ceph-mon[46823]: pgmap v80: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:57:47.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:46 vm10 ceph-mon[48982]: pgmap v81: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:57:47.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:46 vm04 ceph-mon[51053]: pgmap v81: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:57:47.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:46 vm04 ceph-mon[46823]: pgmap v81: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:57:49.247 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:57:49 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: ::ffff:192.168.123.110 - - [08/Mar/2026:23:57:49] "GET /metrics HTTP/1.1" 200 207618 "" "Prometheus/2.33.4" 2026-03-08T23:57:49.247 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:48 vm04 ceph-mon[51053]: pgmap v82: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:57:49.247 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:48 vm04 ceph-mon[46823]: pgmap v82: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:57:49.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:48 vm10 ceph-mon[48982]: pgmap v82: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:57:49.327 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 08 23:57:48 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[50180]: ::ffff:192.168.123.110 - - [08/Mar/2026:23:57:48] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-08T23:57:50.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:49 vm10 ceph-mon[48982]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-08T23:57:50.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:49 vm04 ceph-mon[51053]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-08T23:57:50.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:49 vm04 ceph-mon[46823]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-08T23:57:51.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:50 vm10 ceph-mon[48982]: pgmap v83: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:57:51.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:50 vm04 ceph-mon[51053]: pgmap v83: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:57:51.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:50 vm04 ceph-mon[46823]: pgmap v83: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:57:52.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:52 vm10 ceph-mon[48982]: pgmap v84: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:57:52.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:52 vm04 ceph-mon[51053]: pgmap v84: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:57:52.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:52 vm04 ceph-mon[46823]: pgmap v84: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:57:53.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:57:53 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=error ts=2026-03-08T23:57:53.506Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-08T23:57:53.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:57:53 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=error ts=2026-03-08T23:57:53.507Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-08T23:57:53.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:57:53 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-08T23:57:53.508Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-08T23:57:53.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:57:53 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-08T23:57:53.508Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-08T23:57:53.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:57:53 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-08T23:57:53.509Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-08T23:57:53.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:57:53 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-08T23:57:53.510Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-08T23:57:55.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:55 vm10 ceph-mon[48982]: pgmap v85: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:57:55.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:55 vm04 ceph-mon[51053]: pgmap v85: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:57:55.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:55 vm04 ceph-mon[46823]: pgmap v85: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:57:57.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:56 vm10 ceph-mon[48982]: pgmap v86: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:57:57.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:56 vm04 ceph-mon[51053]: pgmap v86: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:57:57.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:56 vm04 ceph-mon[46823]: pgmap v86: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:57:59.258 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:57:59 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: ::ffff:192.168.123.110 - - [08/Mar/2026:23:57:59] "GET /metrics HTTP/1.1" 200 207618 "" "Prometheus/2.33.4" 2026-03-08T23:57:59.258 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:57:58 vm04 ceph-mon[51053]: pgmap v87: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:57:59.258 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:57:58 vm04 ceph-mon[46823]: pgmap v87: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:57:59.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:57:58 vm10 ceph-mon[48982]: pgmap v87: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:57:59.327 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 08 23:57:58 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[50180]: ::ffff:192.168.123.110 - - [08/Mar/2026:23:57:58] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-08T23:58:01.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:58:00 vm10 ceph-mon[48982]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-08T23:58:01.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:58:00 vm10 ceph-mon[48982]: pgmap v88: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:58:01.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:58:00 vm04 ceph-mon[51053]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-08T23:58:01.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:58:00 vm04 ceph-mon[51053]: pgmap v88: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:58:01.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:58:00 vm04 ceph-mon[46823]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-08T23:58:01.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:58:00 vm04 ceph-mon[46823]: pgmap v88: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:58:03.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:58:02 vm10 ceph-mon[48982]: pgmap v89: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:58:03.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:58:02 vm04 ceph-mon[51053]: pgmap v89: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:58:03.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:58:02 vm04 ceph-mon[46823]: pgmap v89: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:58:03.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:58:03 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=error ts=2026-03-08T23:58:03.507Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-08T23:58:03.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:58:03 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=error ts=2026-03-08T23:58:03.507Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-08T23:58:03.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:58:03 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-08T23:58:03.509Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-08T23:58:03.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:58:03 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-08T23:58:03.510Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-08T23:58:03.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:58:03 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-08T23:58:03.510Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-08T23:58:03.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:58:03 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-08T23:58:03.511Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-08T23:58:05.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:58:04 vm10 ceph-mon[48982]: pgmap v90: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:58:05.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:58:04 vm10 ceph-mon[48982]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:58:05.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:58:04 vm10 ceph-mon[48982]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:58:05.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:58:04 vm10 ceph-mon[48982]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:58:05.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:58:04 vm04 ceph-mon[46823]: pgmap v90: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:58:05.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:58:04 vm04 ceph-mon[46823]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:58:05.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:58:04 vm04 ceph-mon[46823]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:58:05.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:58:04 vm04 ceph-mon[46823]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:58:05.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:58:04 vm04 ceph-mon[51053]: pgmap v90: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:58:05.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:58:04 vm04 ceph-mon[51053]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:58:05.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:58:04 vm04 ceph-mon[51053]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:58:05.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:58:04 vm04 ceph-mon[51053]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:58:06.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:58:06 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:58:06.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:58:06 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:58:06.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:58:06 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:58:06.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:58:06 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:58:06.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:58:06 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:58:06.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:58:06 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:58:06.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:58:06 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:58:06.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:58:06 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:58:06.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:58:06 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:58:07.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:58:07 vm04 ceph-mon[46823]: pgmap v91: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:58:07.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:58:07 vm04 ceph-mon[51053]: pgmap v91: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:58:07.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:58:07 vm10 ceph-mon[48982]: pgmap v91: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:58:08.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:58:08 vm10 ceph-mon[48982]: pgmap v92: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:58:08.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:58:08 vm04 ceph-mon[46823]: pgmap v92: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:58:08.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:58:08 vm04 ceph-mon[51053]: pgmap v92: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:58:09.327 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 08 23:58:08 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[50180]: ::ffff:192.168.123.110 - - [08/Mar/2026:23:58:08] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-08T23:58:09.350 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:58:09 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: ::ffff:192.168.123.110 - - [08/Mar/2026:23:58:09] "GET /metrics HTTP/1.1" 200 207594 "" "Prometheus/2.33.4" 2026-03-08T23:58:11.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:58:11 vm10 ceph-mon[48982]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-08T23:58:11.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:58:11 vm10 ceph-mon[48982]: pgmap v93: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:58:11.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:58:11 vm04 ceph-mon[46823]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-08T23:58:11.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:58:11 vm04 ceph-mon[46823]: pgmap v93: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:58:11.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:58:11 vm04 ceph-mon[51053]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-08T23:58:11.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:58:11 vm04 ceph-mon[51053]: pgmap v93: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:58:13.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:58:12 vm10 ceph-mon[48982]: pgmap v94: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:58:13.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:58:12 vm04 ceph-mon[46823]: pgmap v94: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:58:13.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:58:12 vm04 ceph-mon[51053]: pgmap v94: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:58:13.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:58:13 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=error ts=2026-03-08T23:58:13.509Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-08T23:58:13.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:58:13 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=error ts=2026-03-08T23:58:13.509Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-08T23:58:13.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:58:13 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-08T23:58:13.510Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-08T23:58:13.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:58:13 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-08T23:58:13.510Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-08T23:58:13.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:58:13 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-08T23:58:13.512Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-08T23:58:13.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:58:13 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-08T23:58:13.512Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-08T23:58:15.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:58:14 vm10 ceph-mon[48982]: pgmap v95: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:58:15.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:58:14 vm04 ceph-mon[46823]: pgmap v95: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:58:15.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:58:14 vm04 ceph-mon[51053]: pgmap v95: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:58:17.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:58:16 vm10 ceph-mon[48982]: pgmap v96: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:58:17.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:58:16 vm04 ceph-mon[46823]: pgmap v96: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:58:17.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:58:16 vm04 ceph-mon[51053]: pgmap v96: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:58:18.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:58:17 vm10 ceph-mon[48982]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:58:18.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:58:17 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:58:18.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:58:17 vm10 ceph-mon[48982]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-08T23:58:18.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:58:17 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-08T23:58:18.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:58:17 vm04 ceph-mon[51053]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:58:18.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:58:17 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:58:18.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:58:17 vm04 ceph-mon[51053]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-08T23:58:18.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:58:17 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-08T23:58:18.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:58:17 vm04 ceph-mon[46823]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:58:18.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:58:17 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:58:18.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:58:17 vm04 ceph-mon[46823]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-08T23:58:18.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:58:17 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-08T23:58:19.278 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:58:19 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: ::ffff:192.168.123.110 - - [08/Mar/2026:23:58:19] "GET /metrics HTTP/1.1" 200 207621 "" "Prometheus/2.33.4" 2026-03-08T23:58:19.278 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:58:18 vm04 ceph-mon[46823]: pgmap v97: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:58:19.278 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:58:18 vm04 ceph-mon[51053]: pgmap v97: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:58:19.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:58:18 vm10 ceph-mon[48982]: pgmap v97: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:58:19.328 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 08 23:58:18 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[50180]: ::ffff:192.168.123.110 - - [08/Mar/2026:23:58:18] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-08T23:58:21.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:58:20 vm10 ceph-mon[48982]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-08T23:58:21.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:58:20 vm10 ceph-mon[48982]: pgmap v98: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:58:21.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:58:20 vm04 ceph-mon[46823]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-08T23:58:21.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:58:20 vm04 ceph-mon[46823]: pgmap v98: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:58:21.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:58:20 vm04 ceph-mon[51053]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-08T23:58:21.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:58:20 vm04 ceph-mon[51053]: pgmap v98: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:58:23.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:58:22 vm10 ceph-mon[48982]: pgmap v99: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:58:23.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:58:22 vm04 ceph-mon[51053]: pgmap v99: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:58:23.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:58:22 vm04 ceph-mon[46823]: pgmap v99: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:58:23.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:58:23 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=error ts=2026-03-08T23:58:23.509Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-08T23:58:23.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:58:23 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=error ts=2026-03-08T23:58:23.509Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-08T23:58:23.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:58:23 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-08T23:58:23.511Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-08T23:58:23.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:58:23 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-08T23:58:23.511Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-08T23:58:23.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:58:23 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-08T23:58:23.512Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-08T23:58:23.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:58:23 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-08T23:58:23.512Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-08T23:58:25.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:58:24 vm10 ceph-mon[48982]: pgmap v100: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:58:25.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:58:24 vm04 ceph-mon[51053]: pgmap v100: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:58:25.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:58:24 vm04 ceph-mon[46823]: pgmap v100: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:58:27.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:58:26 vm10 ceph-mon[48982]: pgmap v101: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:58:27.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:58:26 vm04 ceph-mon[51053]: pgmap v101: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:58:27.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:58:26 vm04 ceph-mon[46823]: pgmap v101: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:58:29.289 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:58:29 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: ::ffff:192.168.123.110 - - [08/Mar/2026:23:58:29] "GET /metrics HTTP/1.1" 200 207621 "" "Prometheus/2.33.4" 2026-03-08T23:58:29.289 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:58:28 vm04 ceph-mon[51053]: pgmap v102: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:58:29.289 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:58:28 vm04 ceph-mon[46823]: pgmap v102: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:58:29.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:58:28 vm10 ceph-mon[48982]: pgmap v102: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:58:29.327 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 08 23:58:28 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[50180]: ::ffff:192.168.123.110 - - [08/Mar/2026:23:58:28] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-08T23:58:31.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:58:30 vm10 ceph-mon[48982]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-08T23:58:31.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:58:30 vm10 ceph-mon[48982]: pgmap v103: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:58:31.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:58:30 vm04 ceph-mon[51053]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-08T23:58:31.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:58:30 vm04 ceph-mon[51053]: pgmap v103: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:58:31.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:58:30 vm04 ceph-mon[46823]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-08T23:58:31.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:58:30 vm04 ceph-mon[46823]: pgmap v103: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:58:33.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:58:32 vm10 ceph-mon[48982]: pgmap v104: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:58:33.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:58:32 vm04 ceph-mon[51053]: pgmap v104: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:58:33.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:58:32 vm04 ceph-mon[46823]: pgmap v104: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:58:33.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:58:33 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=error ts=2026-03-08T23:58:33.509Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-08T23:58:33.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:58:33 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=error ts=2026-03-08T23:58:33.510Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-08T23:58:33.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:58:33 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-08T23:58:33.511Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-08T23:58:33.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:58:33 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-08T23:58:33.511Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-08T23:58:33.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:58:33 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-08T23:58:33.512Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-08T23:58:33.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:58:33 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-08T23:58:33.512Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-08T23:58:35.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:58:34 vm10 ceph-mon[48982]: pgmap v105: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:58:35.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:58:34 vm04 ceph-mon[51053]: pgmap v105: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:58:35.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:58:34 vm04 ceph-mon[46823]: pgmap v105: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:58:37.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:58:36 vm10 ceph-mon[48982]: pgmap v106: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:58:37.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:58:36 vm04 ceph-mon[51053]: pgmap v106: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:58:37.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:58:36 vm04 ceph-mon[46823]: pgmap v106: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:58:39.298 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:58:39 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: ::ffff:192.168.123.110 - - [08/Mar/2026:23:58:39] "GET /metrics HTTP/1.1" 200 207641 "" "Prometheus/2.33.4" 2026-03-08T23:58:39.298 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:58:38 vm04 ceph-mon[51053]: pgmap v107: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:58:39.298 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:58:38 vm04 ceph-mon[46823]: pgmap v107: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:58:39.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:58:38 vm10 ceph-mon[48982]: pgmap v107: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:58:39.327 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 08 23:58:38 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[50180]: ::ffff:192.168.123.110 - - [08/Mar/2026:23:58:38] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-08T23:58:41.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:58:40 vm10 ceph-mon[48982]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-08T23:58:41.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:58:40 vm10 ceph-mon[48982]: pgmap v108: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:58:41.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:58:40 vm04 ceph-mon[51053]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-08T23:58:41.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:58:40 vm04 ceph-mon[51053]: pgmap v108: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:58:41.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:58:40 vm04 ceph-mon[46823]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-08T23:58:41.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:58:40 vm04 ceph-mon[46823]: pgmap v108: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:58:43.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:58:42 vm10 ceph-mon[48982]: pgmap v109: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:58:43.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:58:42 vm04 ceph-mon[51053]: pgmap v109: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:58:43.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:58:42 vm04 ceph-mon[46823]: pgmap v109: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:58:43.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:58:43 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=error ts=2026-03-08T23:58:43.510Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-08T23:58:43.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:58:43 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=error ts=2026-03-08T23:58:43.510Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-08T23:58:43.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:58:43 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-08T23:58:43.512Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-08T23:58:43.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:58:43 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-08T23:58:43.512Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-08T23:58:43.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:58:43 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-08T23:58:43.513Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-08T23:58:43.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:58:43 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-08T23:58:43.513Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-08T23:58:45.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:58:44 vm10 ceph-mon[48982]: pgmap v110: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:58:45.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:58:44 vm04 ceph-mon[51053]: pgmap v110: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:58:45.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:58:44 vm04 ceph-mon[46823]: pgmap v110: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:58:47.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:58:46 vm10 ceph-mon[48982]: pgmap v111: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:58:47.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:58:46 vm04 ceph-mon[51053]: pgmap v111: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:58:47.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:58:46 vm04 ceph-mon[46823]: pgmap v111: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:58:49.308 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:58:49 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: ::ffff:192.168.123.110 - - [08/Mar/2026:23:58:49] "GET /metrics HTTP/1.1" 200 207646 "" "Prometheus/2.33.4" 2026-03-08T23:58:49.309 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:58:48 vm04 ceph-mon[51053]: pgmap v112: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:58:49.309 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:58:48 vm04 ceph-mon[46823]: pgmap v112: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:58:49.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:58:49 vm10 ceph-mon[48982]: pgmap v112: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:58:49.327 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 08 23:58:48 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[50180]: ::ffff:192.168.123.110 - - [08/Mar/2026:23:58:48] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-08T23:58:51.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:58:51 vm10 ceph-mon[48982]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-08T23:58:51.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:58:51 vm10 ceph-mon[48982]: pgmap v113: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:58:51.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:58:51 vm04 ceph-mon[46823]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-08T23:58:51.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:58:51 vm04 ceph-mon[46823]: pgmap v113: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:58:51.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:58:51 vm04 ceph-mon[51053]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-08T23:58:51.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:58:51 vm04 ceph-mon[51053]: pgmap v113: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:58:53.511 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:58:53 vm04 ceph-mon[51053]: pgmap v114: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:58:53.511 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:58:53 vm04 ceph-mon[46823]: pgmap v114: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:58:53.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:58:53 vm10 ceph-mon[48982]: pgmap v114: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:58:53.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:58:53 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=error ts=2026-03-08T23:58:53.511Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-08T23:58:53.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:58:53 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=error ts=2026-03-08T23:58:53.511Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-08T23:58:53.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:58:53 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-08T23:58:53.513Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-08T23:58:53.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:58:53 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-08T23:58:53.513Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-08T23:58:53.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:58:53 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-08T23:58:53.514Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-08T23:58:53.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:58:53 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-08T23:58:53.515Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-08T23:58:55.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:58:55 vm10 ceph-mon[48982]: pgmap v115: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:58:55.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:58:55 vm04 ceph-mon[51053]: pgmap v115: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:58:55.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:58:55 vm04 ceph-mon[46823]: pgmap v115: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:58:57.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:58:57 vm10 ceph-mon[48982]: pgmap v116: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:58:57.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:58:57 vm04 ceph-mon[51053]: pgmap v116: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:58:57.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:58:57 vm04 ceph-mon[46823]: pgmap v116: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:58:59.319 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:58:59 vm04 ceph-mon[46823]: pgmap v117: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:58:59.320 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:58:59 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: ::ffff:192.168.123.110 - - [08/Mar/2026:23:58:59] "GET /metrics HTTP/1.1" 200 207646 "" "Prometheus/2.33.4" 2026-03-08T23:58:59.320 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:58:59 vm04 ceph-mon[51053]: pgmap v117: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:58:59.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:58:59 vm10 ceph-mon[48982]: pgmap v117: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:58:59.327 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 08 23:58:58 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[50180]: ::ffff:192.168.123.110 - - [08/Mar/2026:23:58:58] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-08T23:59:01.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:59:01 vm10 ceph-mon[48982]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-08T23:59:01.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:59:01 vm10 ceph-mon[48982]: pgmap v118: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:59:01.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:59:01 vm04 ceph-mon[51053]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-08T23:59:01.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:59:01 vm04 ceph-mon[51053]: pgmap v118: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:59:01.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:59:01 vm04 ceph-mon[46823]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-08T23:59:01.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:59:01 vm04 ceph-mon[46823]: pgmap v118: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:59:03.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:59:02 vm04 ceph-mon[51053]: pgmap v119: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:59:03.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:59:02 vm04 ceph-mon[46823]: pgmap v119: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:59:03.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:59:02 vm10 ceph-mon[48982]: pgmap v119: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:59:03.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:59:03 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=error ts=2026-03-08T23:59:03.512Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-08T23:59:03.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:59:03 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=error ts=2026-03-08T23:59:03.512Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-08T23:59:03.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:59:03 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-08T23:59:03.515Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-08T23:59:03.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:59:03 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-08T23:59:03.515Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-08T23:59:03.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:59:03 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-08T23:59:03.516Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-08T23:59:03.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:59:03 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-08T23:59:03.516Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-08T23:59:05.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:59:04 vm10 ceph-mon[48982]: pgmap v120: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:59:05.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:59:04 vm04 ceph-mon[51053]: pgmap v120: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:59:05.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:59:04 vm04 ceph-mon[46823]: pgmap v120: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:59:06.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:59:05 vm10 ceph-mon[48982]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:59:06.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:59:05 vm10 ceph-mon[48982]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:59:06.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:59:05 vm10 ceph-mon[48982]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:59:06.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:59:05 vm04 ceph-mon[51053]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:59:06.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:59:05 vm04 ceph-mon[51053]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:59:06.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:59:05 vm04 ceph-mon[51053]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:59:06.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:59:05 vm04 ceph-mon[46823]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:59:06.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:59:05 vm04 ceph-mon[46823]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-08T23:59:06.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:59:05 vm04 ceph-mon[46823]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-08T23:59:07.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:59:06 vm10 ceph-mon[48982]: pgmap v121: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:59:07.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:59:06 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:59:07.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:59:06 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:59:07.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:59:06 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:59:07.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:59:06 vm04 ceph-mon[46823]: pgmap v121: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:59:07.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:59:06 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:59:07.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:59:06 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:59:07.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:59:06 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:59:07.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:59:06 vm04 ceph-mon[51053]: pgmap v121: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:59:07.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:59:06 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:59:07.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:59:06 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:59:07.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:59:06 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:59:09.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:59:08 vm10 ceph-mon[48982]: pgmap v122: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:59:09.327 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 08 23:59:08 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[50180]: ::ffff:192.168.123.110 - - [08/Mar/2026:23:59:08] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-08T23:59:09.328 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:59:09 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: ::ffff:192.168.123.110 - - [08/Mar/2026:23:59:09] "GET /metrics HTTP/1.1" 200 207634 "" "Prometheus/2.33.4" 2026-03-08T23:59:09.329 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:59:08 vm04 ceph-mon[46823]: pgmap v122: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:59:09.329 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:59:08 vm04 ceph-mon[51053]: pgmap v122: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:59:11.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:59:10 vm10 ceph-mon[48982]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-08T23:59:11.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:59:10 vm10 ceph-mon[48982]: pgmap v123: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:59:11.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:59:10 vm04 ceph-mon[46823]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-08T23:59:11.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:59:10 vm04 ceph-mon[46823]: pgmap v123: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:59:11.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:59:10 vm04 ceph-mon[51053]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-08T23:59:11.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:59:10 vm04 ceph-mon[51053]: pgmap v123: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:59:13.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:59:12 vm04 ceph-mon[51053]: pgmap v124: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:59:13.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:59:12 vm04 ceph-mon[46823]: pgmap v124: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:59:13.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:59:12 vm10 ceph-mon[48982]: pgmap v124: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:59:13.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:59:13 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=error ts=2026-03-08T23:59:13.513Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-08T23:59:13.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:59:13 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=error ts=2026-03-08T23:59:13.513Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-08T23:59:13.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:59:13 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-08T23:59:13.514Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-08T23:59:13.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:59:13 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-08T23:59:13.515Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-08T23:59:13.851 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:59:13 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-08T23:59:13.515Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-08T23:59:13.851 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:59:13 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-08T23:59:13.516Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-08T23:59:15.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:59:14 vm10 ceph-mon[48982]: pgmap v125: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:59:15.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:59:14 vm04 ceph-mon[51053]: pgmap v125: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:59:15.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:59:14 vm04 ceph-mon[46823]: pgmap v125: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:59:17.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:59:16 vm10 ceph-mon[48982]: pgmap v126: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:59:17.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:59:16 vm04 ceph-mon[51053]: pgmap v126: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:59:17.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:59:16 vm04 ceph-mon[46823]: pgmap v126: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:59:18.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:59:17 vm10 ceph-mon[48982]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:59:18.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:59:17 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:59:18.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:59:17 vm10 ceph-mon[48982]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-08T23:59:18.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:59:17 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-08T23:59:18.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:59:17 vm04 ceph-mon[51053]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:59:18.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:59:17 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:59:18.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:59:17 vm04 ceph-mon[51053]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-08T23:59:18.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:59:17 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-08T23:59:18.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:59:17 vm04 ceph-mon[46823]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:59:18.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:59:17 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-08T23:59:18.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:59:17 vm04 ceph-mon[46823]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-08T23:59:18.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:59:17 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-08T23:59:19.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:59:18 vm10 ceph-mon[48982]: pgmap v127: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:59:19.328 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 08 23:59:18 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[50180]: ::ffff:192.168.123.110 - - [08/Mar/2026:23:59:18] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-08T23:59:19.337 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:59:19 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: ::ffff:192.168.123.110 - - [08/Mar/2026:23:59:19] "GET /metrics HTTP/1.1" 200 207608 "" "Prometheus/2.33.4" 2026-03-08T23:59:19.337 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:59:18 vm04 ceph-mon[51053]: pgmap v127: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:59:19.338 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:59:18 vm04 ceph-mon[46823]: pgmap v127: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:59:21.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:59:20 vm10 ceph-mon[48982]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-08T23:59:21.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:59:20 vm10 ceph-mon[48982]: pgmap v128: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:59:21.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:59:20 vm04 ceph-mon[51053]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-08T23:59:21.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:59:20 vm04 ceph-mon[51053]: pgmap v128: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:59:21.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:59:20 vm04 ceph-mon[46823]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-08T23:59:21.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:59:20 vm04 ceph-mon[46823]: pgmap v128: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:59:23.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:59:22 vm04 ceph-mon[51053]: pgmap v129: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:59:23.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:59:22 vm04 ceph-mon[46823]: pgmap v129: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:59:23.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:59:22 vm10 ceph-mon[48982]: pgmap v129: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:59:23.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:59:23 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=error ts=2026-03-08T23:59:23.513Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-08T23:59:23.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:59:23 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=error ts=2026-03-08T23:59:23.513Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-08T23:59:23.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:59:23 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-08T23:59:23.515Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-08T23:59:23.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:59:23 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-08T23:59:23.515Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-08T23:59:23.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:59:23 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-08T23:59:23.516Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-08T23:59:23.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:59:23 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-08T23:59:23.516Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-08T23:59:25.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:59:24 vm10 ceph-mon[48982]: pgmap v130: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:59:25.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:59:24 vm04 ceph-mon[51053]: pgmap v130: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:59:25.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:59:24 vm04 ceph-mon[46823]: pgmap v130: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:59:27.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:59:26 vm10 ceph-mon[48982]: pgmap v131: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:59:27.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:59:26 vm04 ceph-mon[51053]: pgmap v131: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:59:27.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:59:26 vm04 ceph-mon[46823]: pgmap v131: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:59:29.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:59:28 vm10 ceph-mon[48982]: pgmap v132: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:59:29.328 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 08 23:59:28 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[50180]: ::ffff:192.168.123.110 - - [08/Mar/2026:23:59:28] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-08T23:59:29.347 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:59:29 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: ::ffff:192.168.123.110 - - [08/Mar/2026:23:59:29] "GET /metrics HTTP/1.1" 200 207608 "" "Prometheus/2.33.4" 2026-03-08T23:59:29.347 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:59:28 vm04 ceph-mon[51053]: pgmap v132: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:59:29.348 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:59:28 vm04 ceph-mon[46823]: pgmap v132: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:59:31.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:59:30 vm10 ceph-mon[48982]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-08T23:59:31.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:59:30 vm10 ceph-mon[48982]: pgmap v133: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:59:31.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:59:30 vm04 ceph-mon[51053]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-08T23:59:31.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:59:30 vm04 ceph-mon[51053]: pgmap v133: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:59:31.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:59:30 vm04 ceph-mon[46823]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-08T23:59:31.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:59:30 vm04 ceph-mon[46823]: pgmap v133: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:59:33.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:59:32 vm04 ceph-mon[51053]: pgmap v134: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:59:33.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:59:32 vm04 ceph-mon[46823]: pgmap v134: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:59:33.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:59:32 vm10 ceph-mon[48982]: pgmap v134: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:59:33.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:59:33 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=error ts=2026-03-08T23:59:33.514Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-08T23:59:33.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:59:33 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=error ts=2026-03-08T23:59:33.514Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-08T23:59:33.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:59:33 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-08T23:59:33.516Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-08T23:59:33.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:59:33 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-08T23:59:33.516Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-08T23:59:33.851 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:59:33 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-08T23:59:33.517Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-08T23:59:33.851 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:59:33 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-08T23:59:33.517Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-08T23:59:35.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:59:34 vm10 ceph-mon[48982]: pgmap v135: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:59:35.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:59:34 vm04 ceph-mon[51053]: pgmap v135: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:59:35.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:59:34 vm04 ceph-mon[46823]: pgmap v135: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:59:37.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:59:36 vm10 ceph-mon[48982]: pgmap v136: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:59:37.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:59:36 vm04 ceph-mon[51053]: pgmap v136: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:59:37.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:59:36 vm04 ceph-mon[46823]: pgmap v136: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:59:39.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:59:38 vm10 ceph-mon[48982]: pgmap v137: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:59:39.327 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 08 23:59:38 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[50180]: ::ffff:192.168.123.110 - - [08/Mar/2026:23:59:38] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-08T23:59:39.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:59:38 vm04 ceph-mon[46823]: pgmap v137: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:59:39.350 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:59:39 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: ::ffff:192.168.123.110 - - [08/Mar/2026:23:59:39] "GET /metrics HTTP/1.1" 200 207571 "" "Prometheus/2.33.4" 2026-03-08T23:59:39.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:59:38 vm04 ceph-mon[51053]: pgmap v137: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:59:41.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:59:40 vm10 ceph-mon[48982]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-08T23:59:41.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:59:40 vm10 ceph-mon[48982]: pgmap v138: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:59:41.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:59:40 vm04 ceph-mon[46823]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-08T23:59:41.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:59:40 vm04 ceph-mon[46823]: pgmap v138: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:59:41.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:59:40 vm04 ceph-mon[51053]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-08T23:59:41.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:59:40 vm04 ceph-mon[51053]: pgmap v138: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:59:43.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:59:42 vm04 ceph-mon[46823]: pgmap v139: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:59:43.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:59:42 vm04 ceph-mon[51053]: pgmap v139: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:59:43.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:59:42 vm10 ceph-mon[48982]: pgmap v139: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:59:43.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:59:43 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=error ts=2026-03-08T23:59:43.515Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-08T23:59:43.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:59:43 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=error ts=2026-03-08T23:59:43.516Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-08T23:59:43.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:59:43 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-08T23:59:43.517Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-08T23:59:43.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:59:43 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-08T23:59:43.518Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-08T23:59:43.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:59:43 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-08T23:59:43.518Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-08T23:59:43.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:59:43 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-08T23:59:43.519Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-08T23:59:45.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:59:44 vm10 ceph-mon[48982]: pgmap v140: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:59:45.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:59:44 vm04 ceph-mon[46823]: pgmap v140: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:59:45.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:59:44 vm04 ceph-mon[51053]: pgmap v140: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:59:47.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:59:46 vm10 ceph-mon[48982]: pgmap v141: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:59:47.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:59:46 vm04 ceph-mon[46823]: pgmap v141: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:59:47.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:59:46 vm04 ceph-mon[51053]: pgmap v141: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:59:49.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:59:49 vm10 ceph-mon[48982]: pgmap v142: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:59:49.328 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 08 23:59:48 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[50180]: ::ffff:192.168.123.110 - - [08/Mar/2026:23:59:48] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-08T23:59:49.350 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:59:49 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: ::ffff:192.168.123.110 - - [08/Mar/2026:23:59:49] "GET /metrics HTTP/1.1" 200 207576 "" "Prometheus/2.33.4" 2026-03-08T23:59:49.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:59:49 vm04 ceph-mon[46823]: pgmap v142: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:59:49.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:59:49 vm04 ceph-mon[51053]: pgmap v142: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:59:49.814 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph config set mon mon_warn_on_insecure_global_id_reclaim false --force' 2026-03-08T23:59:50.335 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph config set mon mon_warn_on_insecure_global_id_reclaim_allowed false --force' 2026-03-08T23:59:50.867 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph config set global log_to_journald false --force' 2026-03-08T23:59:51.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:59:51 vm04 ceph-mon[51053]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-08T23:59:51.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:59:51 vm04 ceph-mon[51053]: pgmap v143: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:59:51.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:59:51 vm04 ceph-mon[46823]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-08T23:59:51.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:59:51 vm04 ceph-mon[46823]: pgmap v143: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:59:51.490 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch ps' 2026-03-08T23:59:51.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:59:51 vm10 ceph-mon[48982]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-08T23:59:51.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:59:51 vm10 ceph-mon[48982]: pgmap v143: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:59:51.985 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-08T23:59:51.985 INFO:teuthology.orchestra.run.vm04.stdout:alertmanager.a vm04 *:9093,9094 running (2m) 45s ago 3m 25.7M - ba2b418f427c 77a61b512c93 2026-03-08T23:59:51.985 INFO:teuthology.orchestra.run.vm04.stdout:grafana.a vm10 *:3000 running (2m) 45s ago 2m 48.8M - 8.3.5 dad864ee21e9 a1c7daa645e2 2026-03-08T23:59:51.985 INFO:teuthology.orchestra.run.vm04.stdout:iscsi.foo.vm04.fbyciv vm04 running (2m) 45s ago 2m 46.8M - 3.5 e1d6a67b021e 5cbf21b64379 2026-03-08T23:59:51.985 INFO:teuthology.orchestra.run.vm04.stdout:mgr.x vm10 *:8443 running (5m) 45s ago 5m 418M - 17.2.0 e1d6a67b021e 4eb30174d7d9 2026-03-08T23:59:51.985 INFO:teuthology.orchestra.run.vm04.stdout:mgr.y vm04 *:9283 running (5m) 45s ago 5m 468M - 17.2.0 e1d6a67b021e 428d867911a5 2026-03-08T23:59:51.985 INFO:teuthology.orchestra.run.vm04.stdout:mon.a vm04 running (5m) 45s ago 5m 54.0M 2048M 17.2.0 e1d6a67b021e a0a441d060f5 2026-03-08T23:59:51.985 INFO:teuthology.orchestra.run.vm04.stdout:mon.b vm10 running (5m) 45s ago 5m 51.8M 2048M 17.2.0 e1d6a67b021e a4c3c4f2dde9 2026-03-08T23:59:51.985 INFO:teuthology.orchestra.run.vm04.stdout:mon.c vm04 running (5m) 45s ago 5m 49.0M 2048M 17.2.0 e1d6a67b021e 5c2d9165643c 2026-03-08T23:59:51.985 INFO:teuthology.orchestra.run.vm04.stdout:node-exporter.a vm04 *:9100 running (3m) 45s ago 3m 19.4M - 1dbe0e931976 ff6d0adb33b7 2026-03-08T23:59:51.985 INFO:teuthology.orchestra.run.vm04.stdout:node-exporter.b vm10 *:9100 running (3m) 45s ago 3m 19.1M - 1dbe0e931976 c055c663d1e8 2026-03-08T23:59:51.986 INFO:teuthology.orchestra.run.vm04.stdout:osd.0 vm04 running (4m) 45s ago 4m 52.6M 4096M 17.2.0 e1d6a67b021e eb4d6ee04c91 2026-03-08T23:59:51.986 INFO:teuthology.orchestra.run.vm04.stdout:osd.1 vm04 running (4m) 45s ago 4m 49.6M 4096M 17.2.0 e1d6a67b021e f112f05700b8 2026-03-08T23:59:51.986 INFO:teuthology.orchestra.run.vm04.stdout:osd.2 vm04 running (4m) 45s ago 4m 47.2M 4096M 17.2.0 e1d6a67b021e a4ed5ecab7e4 2026-03-08T23:59:51.986 INFO:teuthology.orchestra.run.vm04.stdout:osd.3 vm04 running (4m) 45s ago 4m 47.7M 4096M 17.2.0 e1d6a67b021e d530f6e786d9 2026-03-08T23:59:51.986 INFO:teuthology.orchestra.run.vm04.stdout:osd.4 vm10 running (4m) 45s ago 4m 49.1M 4096M 17.2.0 e1d6a67b021e ad302e6f363c 2026-03-08T23:59:51.986 INFO:teuthology.orchestra.run.vm04.stdout:osd.5 vm10 running (4m) 45s ago 4m 47.2M 4096M 17.2.0 e1d6a67b021e 1bef86fdb303 2026-03-08T23:59:51.986 INFO:teuthology.orchestra.run.vm04.stdout:osd.6 vm10 running (3m) 45s ago 3m 46.4M 4096M 17.2.0 e1d6a67b021e 168db5828111 2026-03-08T23:59:51.986 INFO:teuthology.orchestra.run.vm04.stdout:osd.7 vm10 running (3m) 45s ago 3m 49.7M 4096M 17.2.0 e1d6a67b021e bc6bbac15079 2026-03-08T23:59:51.986 INFO:teuthology.orchestra.run.vm04.stdout:prometheus.a vm10 *:9095 running (2m) 45s ago 3m 56.4M - 514e6a882f6e 60b4398433db 2026-03-08T23:59:51.986 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.ehrfsf vm04 *:8000 running (2m) 45s ago 2m 91.4M - 17.2.0 e1d6a67b021e a815abb0c790 2026-03-08T23:59:51.986 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm10.dwizvi vm10 *:8000 running (2m) 45s ago 2m 89.4M - 17.2.0 e1d6a67b021e f6412acdf6e0 2026-03-08T23:59:52.059 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph versions' 2026-03-08T23:59:52.563 INFO:teuthology.orchestra.run.vm04.stdout:{ 2026-03-08T23:59:52.563 INFO:teuthology.orchestra.run.vm04.stdout: "mon": { 2026-03-08T23:59:52.563 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 3 2026-03-08T23:59:52.563 INFO:teuthology.orchestra.run.vm04.stdout: }, 2026-03-08T23:59:52.563 INFO:teuthology.orchestra.run.vm04.stdout: "mgr": { 2026-03-08T23:59:52.563 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 2 2026-03-08T23:59:52.563 INFO:teuthology.orchestra.run.vm04.stdout: }, 2026-03-08T23:59:52.563 INFO:teuthology.orchestra.run.vm04.stdout: "osd": { 2026-03-08T23:59:52.563 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 8 2026-03-08T23:59:52.563 INFO:teuthology.orchestra.run.vm04.stdout: }, 2026-03-08T23:59:52.563 INFO:teuthology.orchestra.run.vm04.stdout: "mds": {}, 2026-03-08T23:59:52.563 INFO:teuthology.orchestra.run.vm04.stdout: "rgw": { 2026-03-08T23:59:52.563 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 2 2026-03-08T23:59:52.563 INFO:teuthology.orchestra.run.vm04.stdout: }, 2026-03-08T23:59:52.563 INFO:teuthology.orchestra.run.vm04.stdout: "overall": { 2026-03-08T23:59:52.564 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 15 2026-03-08T23:59:52.564 INFO:teuthology.orchestra.run.vm04.stdout: } 2026-03-08T23:59:52.564 INFO:teuthology.orchestra.run.vm04.stdout:} 2026-03-08T23:59:52.613 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph -s' 2026-03-08T23:59:53.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:59:52 vm04 ceph-mon[46823]: pgmap v144: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:59:53.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:59:52 vm04 ceph-mon[46823]: from='client.24736 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:59:53.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:59:52 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/3725793734' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-08T23:59:53.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:59:52 vm04 ceph-mon[51053]: pgmap v144: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:59:53.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:59:52 vm04 ceph-mon[51053]: from='client.24736 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:59:53.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:59:52 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/3725793734' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-08T23:59:53.103 INFO:teuthology.orchestra.run.vm04.stdout: cluster: 2026-03-08T23:59:53.103 INFO:teuthology.orchestra.run.vm04.stdout: id: fdcbddf6-1b49-11f1-80b0-7392062373f9 2026-03-08T23:59:53.103 INFO:teuthology.orchestra.run.vm04.stdout: health: HEALTH_OK 2026-03-08T23:59:53.103 INFO:teuthology.orchestra.run.vm04.stdout: 2026-03-08T23:59:53.103 INFO:teuthology.orchestra.run.vm04.stdout: services: 2026-03-08T23:59:53.103 INFO:teuthology.orchestra.run.vm04.stdout: mon: 3 daemons, quorum a,c,b (age 5m) 2026-03-08T23:59:53.103 INFO:teuthology.orchestra.run.vm04.stdout: mgr: y(active, since 3m), standbys: x 2026-03-08T23:59:53.103 INFO:teuthology.orchestra.run.vm04.stdout: osd: 8 osds: 8 up (since 3m), 8 in (since 3m) 2026-03-08T23:59:53.103 INFO:teuthology.orchestra.run.vm04.stdout: rgw: 2 daemons active (2 hosts, 1 zones) 2026-03-08T23:59:53.103 INFO:teuthology.orchestra.run.vm04.stdout: 2026-03-08T23:59:53.103 INFO:teuthology.orchestra.run.vm04.stdout: data: 2026-03-08T23:59:53.103 INFO:teuthology.orchestra.run.vm04.stdout: pools: 6 pools, 161 pgs 2026-03-08T23:59:53.103 INFO:teuthology.orchestra.run.vm04.stdout: objects: 209 objects, 457 KiB 2026-03-08T23:59:53.103 INFO:teuthology.orchestra.run.vm04.stdout: usage: 67 MiB used, 160 GiB / 160 GiB avail 2026-03-08T23:59:53.103 INFO:teuthology.orchestra.run.vm04.stdout: pgs: 161 active+clean 2026-03-08T23:59:53.103 INFO:teuthology.orchestra.run.vm04.stdout: 2026-03-08T23:59:53.103 INFO:teuthology.orchestra.run.vm04.stdout: io: 2026-03-08T23:59:53.103 INFO:teuthology.orchestra.run.vm04.stdout: client: 853 B/s rd, 0 op/s rd, 0 op/s wr 2026-03-08T23:59:53.103 INFO:teuthology.orchestra.run.vm04.stdout: 2026-03-08T23:59:53.180 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch ls' 2026-03-08T23:59:53.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:59:52 vm10 ceph-mon[48982]: pgmap v144: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:59:53.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:59:52 vm10 ceph-mon[48982]: from='client.24736 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:59:53.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:59:52 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/3725793734' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-08T23:59:53.651 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:59:53 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=error ts=2026-03-08T23:59:53.517Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-08T23:59:53.652 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:59:53 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-08T23:59:53.520Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-08T23:59:53.652 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:59:53 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-08T23:59:53.520Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-08T23:59:53.652 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:59:53 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=error ts=2026-03-08T23:59:53.527Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-08T23:59:53.652 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:59:53 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-08T23:59:53.528Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-08T23:59:53.652 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 08 23:59:53 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-08T23:59:53.529Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-08T23:59:53.652 INFO:teuthology.orchestra.run.vm04.stdout:NAME PORTS RUNNING REFRESHED AGE PLACEMENT 2026-03-08T23:59:53.652 INFO:teuthology.orchestra.run.vm04.stdout:alertmanager ?:9093,9094 1/1 47s ago 3m vm04=a;count:1 2026-03-08T23:59:53.652 INFO:teuthology.orchestra.run.vm04.stdout:grafana ?:3000 1/1 47s ago 3m vm10=a;count:1 2026-03-08T23:59:53.652 INFO:teuthology.orchestra.run.vm04.stdout:iscsi.foo 1/1 47s ago 3m count:1 2026-03-08T23:59:53.652 INFO:teuthology.orchestra.run.vm04.stdout:mgr 2/2 47s ago 5m vm04=y;vm10=x;count:2 2026-03-08T23:59:53.652 INFO:teuthology.orchestra.run.vm04.stdout:mon 3/3 47s ago 5m vm04:192.168.123.104=a;vm04:[v2:192.168.123.104:3301,v1:192.168.123.104:6790]=c;vm10:192.168.123.110=b;count:3 2026-03-08T23:59:53.652 INFO:teuthology.orchestra.run.vm04.stdout:node-exporter ?:9100 2/2 47s ago 3m vm04=a;vm10=b;count:2 2026-03-08T23:59:53.652 INFO:teuthology.orchestra.run.vm04.stdout:osd 8 47s ago - 2026-03-08T23:59:53.652 INFO:teuthology.orchestra.run.vm04.stdout:prometheus ?:9095 1/1 47s ago 3m vm10=a;count:1 2026-03-08T23:59:53.652 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo ?:8000 2/2 47s ago 2m count:2 2026-03-08T23:59:53.724 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch daemon redeploy "mgr.$(ceph mgr dump -f json | jq .standbys | jq .[] | jq -r .name)" --image quay.ceph.io/ceph-ci/ceph:$sha1' 2026-03-08T23:59:53.902 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:59:53 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/1501515283' entity='client.admin' cmd=[{"prefix": "status"}]: dispatch 2026-03-08T23:59:53.902 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:59:53 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/1501515283' entity='client.admin' cmd=[{"prefix": "status"}]: dispatch 2026-03-08T23:59:54.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:59:53 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/1501515283' entity='client.admin' cmd=[{"prefix": "status"}]: dispatch 2026-03-08T23:59:54.438 INFO:teuthology.orchestra.run.vm04.stdout:Scheduled to redeploy mgr.x on host 'vm10' 2026-03-08T23:59:54.490 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch ps --refresh' 2026-03-08T23:59:54.998 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:59:54 vm04 ceph-mon[46823]: from='client.24754 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:59:54.998 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:59:54 vm04 ceph-mon[46823]: pgmap v145: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:59:54.998 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:59:54 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/1296842652' entity='client.admin' cmd=[{"prefix": "mgr dump", "format": "json"}]: dispatch 2026-03-08T23:59:54.998 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:59:54 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:59:54.999 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:59:54 vm04 ceph-mon[46823]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:59:54.999 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:59:54 vm04 ceph-mon[51053]: from='client.24754 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:59:54.999 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:59:54 vm04 ceph-mon[51053]: pgmap v145: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:59:54.999 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:59:54 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/1296842652' entity='client.admin' cmd=[{"prefix": "mgr dump", "format": "json"}]: dispatch 2026-03-08T23:59:54.999 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:59:54 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:59:54.999 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:59:54 vm04 ceph-mon[51053]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:59:55.001 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-08T23:59:55.001 INFO:teuthology.orchestra.run.vm04.stdout:alertmanager.a vm04 *:9093,9094 running (2m) 48s ago 3m 25.7M - ba2b418f427c 77a61b512c93 2026-03-08T23:59:55.001 INFO:teuthology.orchestra.run.vm04.stdout:grafana.a vm10 *:3000 running (3m) 48s ago 3m 48.8M - 8.3.5 dad864ee21e9 a1c7daa645e2 2026-03-08T23:59:55.001 INFO:teuthology.orchestra.run.vm04.stdout:iscsi.foo.vm04.fbyciv vm04 running (2m) 48s ago 2m 46.8M - 3.5 e1d6a67b021e 5cbf21b64379 2026-03-08T23:59:55.001 INFO:teuthology.orchestra.run.vm04.stdout:mgr.x vm10 *:8443 running (5m) 48s ago 5m 418M - 17.2.0 e1d6a67b021e 4eb30174d7d9 2026-03-08T23:59:55.001 INFO:teuthology.orchestra.run.vm04.stdout:mgr.y vm04 *:9283 running (5m) 48s ago 5m 468M - 17.2.0 e1d6a67b021e 428d867911a5 2026-03-08T23:59:55.001 INFO:teuthology.orchestra.run.vm04.stdout:mon.a vm04 running (5m) 48s ago 5m 54.0M 2048M 17.2.0 e1d6a67b021e a0a441d060f5 2026-03-08T23:59:55.001 INFO:teuthology.orchestra.run.vm04.stdout:mon.b vm10 running (5m) 48s ago 5m 51.8M 2048M 17.2.0 e1d6a67b021e a4c3c4f2dde9 2026-03-08T23:59:55.001 INFO:teuthology.orchestra.run.vm04.stdout:mon.c vm04 running (5m) 48s ago 5m 49.0M 2048M 17.2.0 e1d6a67b021e 5c2d9165643c 2026-03-08T23:59:55.001 INFO:teuthology.orchestra.run.vm04.stdout:node-exporter.a vm04 *:9100 running (3m) 48s ago 3m 19.4M - 1dbe0e931976 ff6d0adb33b7 2026-03-08T23:59:55.001 INFO:teuthology.orchestra.run.vm04.stdout:node-exporter.b vm10 *:9100 running (3m) 48s ago 3m 19.1M - 1dbe0e931976 c055c663d1e8 2026-03-08T23:59:55.001 INFO:teuthology.orchestra.run.vm04.stdout:osd.0 vm04 running (4m) 48s ago 4m 52.6M 4096M 17.2.0 e1d6a67b021e eb4d6ee04c91 2026-03-08T23:59:55.001 INFO:teuthology.orchestra.run.vm04.stdout:osd.1 vm04 running (4m) 48s ago 4m 49.6M 4096M 17.2.0 e1d6a67b021e f112f05700b8 2026-03-08T23:59:55.001 INFO:teuthology.orchestra.run.vm04.stdout:osd.2 vm04 running (4m) 48s ago 4m 47.2M 4096M 17.2.0 e1d6a67b021e a4ed5ecab7e4 2026-03-08T23:59:55.001 INFO:teuthology.orchestra.run.vm04.stdout:osd.3 vm04 running (4m) 48s ago 4m 47.7M 4096M 17.2.0 e1d6a67b021e d530f6e786d9 2026-03-08T23:59:55.001 INFO:teuthology.orchestra.run.vm04.stdout:osd.4 vm10 running (4m) 48s ago 4m 49.1M 4096M 17.2.0 e1d6a67b021e ad302e6f363c 2026-03-08T23:59:55.001 INFO:teuthology.orchestra.run.vm04.stdout:osd.5 vm10 running (4m) 48s ago 4m 47.2M 4096M 17.2.0 e1d6a67b021e 1bef86fdb303 2026-03-08T23:59:55.001 INFO:teuthology.orchestra.run.vm04.stdout:osd.6 vm10 running (4m) 48s ago 4m 46.4M 4096M 17.2.0 e1d6a67b021e 168db5828111 2026-03-08T23:59:55.001 INFO:teuthology.orchestra.run.vm04.stdout:osd.7 vm10 running (3m) 48s ago 3m 49.7M 4096M 17.2.0 e1d6a67b021e bc6bbac15079 2026-03-08T23:59:55.001 INFO:teuthology.orchestra.run.vm04.stdout:prometheus.a vm10 *:9095 running (2m) 48s ago 3m 56.4M - 514e6a882f6e 60b4398433db 2026-03-08T23:59:55.001 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.ehrfsf vm04 *:8000 running (2m) 48s ago 2m 91.4M - 17.2.0 e1d6a67b021e a815abb0c790 2026-03-08T23:59:55.001 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm10.dwizvi vm10 *:8000 running (2m) 48s ago 2m 89.4M - 17.2.0 e1d6a67b021e f6412acdf6e0 2026-03-08T23:59:55.051 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'sleep 180' 2026-03-08T23:59:55.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:59:54 vm10 ceph-mon[48982]: from='client.24754 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:59:55.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:59:54 vm10 ceph-mon[48982]: pgmap v145: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-08T23:59:55.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:59:54 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/1296842652' entity='client.admin' cmd=[{"prefix": "mgr dump", "format": "json"}]: dispatch 2026-03-08T23:59:55.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:59:54 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' 2026-03-08T23:59:55.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:59:54 vm10 ceph-mon[48982]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-08T23:59:56.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:59:55 vm10 ceph-mon[48982]: from='client.24763 -' entity='client.admin' cmd=[{"prefix": "orch daemon redeploy", "name": "mgr.x", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:59:56.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:59:55 vm10 ceph-mon[48982]: Schedule redeploy daemon mgr.x 2026-03-08T23:59:56.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:59:55 vm10 ceph-mon[48982]: from='client.14820 -' entity='client.admin' cmd=[{"prefix": "orch ps", "refresh": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:59:56.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:59:55 vm04 ceph-mon[51053]: from='client.24763 -' entity='client.admin' cmd=[{"prefix": "orch daemon redeploy", "name": "mgr.x", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:59:56.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:59:55 vm04 ceph-mon[51053]: Schedule redeploy daemon mgr.x 2026-03-08T23:59:56.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:59:55 vm04 ceph-mon[51053]: from='client.14820 -' entity='client.admin' cmd=[{"prefix": "orch ps", "refresh": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:59:56.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:59:55 vm04 ceph-mon[46823]: from='client.24763 -' entity='client.admin' cmd=[{"prefix": "orch daemon redeploy", "name": "mgr.x", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:59:56.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:59:55 vm04 ceph-mon[46823]: Schedule redeploy daemon mgr.x 2026-03-08T23:59:56.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:59:55 vm04 ceph-mon[46823]: from='client.14820 -' entity='client.admin' cmd=[{"prefix": "orch ps", "refresh": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-08T23:59:57.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:59:56 vm10 ceph-mon[48982]: pgmap v146: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:59:57.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:59:56 vm04 ceph-mon[51053]: pgmap v146: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:59:57.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:59:56 vm04 ceph-mon[46823]: pgmap v146: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:59:59.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 08 23:59:58 vm10 ceph-mon[48982]: pgmap v147: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:59:59.327 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 08 23:59:58 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[50180]: ::ffff:192.168.123.110 - - [08/Mar/2026:23:59:58] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-08T23:59:59.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 08 23:59:58 vm04 ceph-mon[51053]: pgmap v147: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:59:59.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 08 23:59:58 vm04 ceph-mon[46823]: pgmap v147: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-08T23:59:59.351 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 08 23:59:59 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: ::ffff:192.168.123.110 - - [08/Mar/2026:23:59:59] "GET /metrics HTTP/1.1" 200 207576 "" "Prometheus/2.33.4" 2026-03-09T00:00:01.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:00:00 vm10 ceph-mon[48982]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:00:01.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:00:00 vm10 ceph-mon[48982]: pgmap v148: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:00:01.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:00:00 vm10 ceph-mon[48982]: overall HEALTH_OK 2026-03-09T00:00:01.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:00:00 vm04 ceph-mon[46823]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:00:01.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:00:00 vm04 ceph-mon[46823]: pgmap v148: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:00:01.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:00:00 vm04 ceph-mon[46823]: overall HEALTH_OK 2026-03-09T00:00:01.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:00:00 vm04 ceph-mon[51053]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:00:01.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:00:00 vm04 ceph-mon[51053]: pgmap v148: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:00:01.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:00:00 vm04 ceph-mon[51053]: overall HEALTH_OK 2026-03-09T00:00:03.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:00:02 vm10 ceph-mon[48982]: pgmap v149: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:00:03.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:00:02 vm04 ceph-mon[46823]: pgmap v149: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:00:03.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:00:02 vm04 ceph-mon[51053]: pgmap v149: 161 pgs: 161 active+clean; 457 KiB data, 67 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:00:03.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:00:03 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=error ts=2026-03-09T00:00:03.519Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-09T00:00:03.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:00:03 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-09T00:00:03.520Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-09T00:00:03.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:00:03 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-09T00:00:03.524Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-09T00:00:03.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:00:03 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=error ts=2026-03-09T00:00:03.527Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-09T00:00:03.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:00:03 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-09T00:00:03.528Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-09T00:00:03.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:00:03 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-09T00:00:03.531Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-09T00:00:05.179 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:00:04 vm04 ceph-mon[46823]: pgmap v150: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 26 KiB/s rd, 0 B/s wr, 42 op/s 2026-03-09T00:00:05.180 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:00:04 vm04 ceph-mon[51053]: pgmap v150: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 26 KiB/s rd, 0 B/s wr, 42 op/s 2026-03-09T00:00:05.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:00:04 vm10 ceph-mon[48982]: pgmap v150: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 26 KiB/s rd, 0 B/s wr, 42 op/s 2026-03-09T00:00:07.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:00:07 vm10 ceph-mon[48982]: pgmap v151: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 25 KiB/s rd, 0 B/s wr, 41 op/s 2026-03-09T00:00:07.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:00:07 vm04 ceph-mon[46823]: pgmap v151: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 25 KiB/s rd, 0 B/s wr, 41 op/s 2026-03-09T00:00:07.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:00:07 vm04 ceph-mon[51053]: pgmap v151: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 25 KiB/s rd, 0 B/s wr, 41 op/s 2026-03-09T00:00:09.224 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:00:08 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[50180]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:00:08] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-09T00:00:09.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:00:09 vm04 ceph-mon[51053]: pgmap v152: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 25 KiB/s rd, 0 B/s wr, 41 op/s 2026-03-09T00:00:09.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:00:09 vm04 ceph-mon[46823]: pgmap v152: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 25 KiB/s rd, 0 B/s wr, 41 op/s 2026-03-09T00:00:09.351 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:00:09 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:00:09] "GET /metrics HTTP/1.1" 200 207573 "" "Prometheus/2.33.4" 2026-03-09T00:00:09.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:00:09 vm10 ceph-mon[48982]: pgmap v152: 161 pgs: 161 active+clean; 457 KiB data, 71 MiB used, 160 GiB / 160 GiB avail; 25 KiB/s rd, 0 B/s wr, 41 op/s 2026-03-09T00:00:11.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:00:11 vm10 ceph-mon[48982]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:00:11.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:00:11 vm10 ceph-mon[48982]: pgmap v153: 161 pgs: 161 active+clean; 457 KiB data, 80 MiB used, 160 GiB / 160 GiB avail; 51 KiB/s rd, 0 B/s wr, 79 op/s 2026-03-09T00:00:11.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:00:11 vm04 ceph-mon[46823]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:00:11.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:00:11 vm04 ceph-mon[46823]: pgmap v153: 161 pgs: 161 active+clean; 457 KiB data, 80 MiB used, 160 GiB / 160 GiB avail; 51 KiB/s rd, 0 B/s wr, 79 op/s 2026-03-09T00:00:11.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:00:11 vm04 ceph-mon[51053]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:00:11.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:00:11 vm04 ceph-mon[51053]: pgmap v153: 161 pgs: 161 active+clean; 457 KiB data, 80 MiB used, 160 GiB / 160 GiB avail; 51 KiB/s rd, 0 B/s wr, 79 op/s 2026-03-09T00:00:13.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:00:12 vm10 ceph-mon[48982]: pgmap v154: 161 pgs: 161 active+clean; 457 KiB data, 80 MiB used, 160 GiB / 160 GiB avail; 50 KiB/s rd, 0 B/s wr, 79 op/s 2026-03-09T00:00:13.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:00:12 vm04 ceph-mon[46823]: pgmap v154: 161 pgs: 161 active+clean; 457 KiB data, 80 MiB used, 160 GiB / 160 GiB avail; 50 KiB/s rd, 0 B/s wr, 79 op/s 2026-03-09T00:00:13.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:00:12 vm04 ceph-mon[51053]: pgmap v154: 161 pgs: 161 active+clean; 457 KiB data, 80 MiB used, 160 GiB / 160 GiB avail; 50 KiB/s rd, 0 B/s wr, 79 op/s 2026-03-09T00:00:13.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:00:13 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=error ts=2026-03-09T00:00:13.519Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-09T00:00:13.851 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:00:13 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-09T00:00:13.521Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-09T00:00:13.851 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:00:13 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-09T00:00:13.521Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-09T00:00:13.851 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:00:13 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=error ts=2026-03-09T00:00:13.527Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-09T00:00:13.851 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:00:13 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-09T00:00:13.528Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-09T00:00:13.851 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:00:13 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-09T00:00:13.529Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-09T00:00:15.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:00:14 vm10 ceph-mon[48982]: pgmap v155: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 58 KiB/s rd, 0 B/s wr, 90 op/s 2026-03-09T00:00:15.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:00:14 vm04 ceph-mon[46823]: pgmap v155: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 58 KiB/s rd, 0 B/s wr, 90 op/s 2026-03-09T00:00:15.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:00:14 vm04 ceph-mon[51053]: pgmap v155: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 58 KiB/s rd, 0 B/s wr, 90 op/s 2026-03-09T00:00:17.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:00:17 vm10 ceph-mon[48982]: pgmap v156: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 33 KiB/s rd, 0 B/s wr, 49 op/s 2026-03-09T00:00:17.608 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:00:17 vm04 ceph-mon[46823]: pgmap v156: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 33 KiB/s rd, 0 B/s wr, 49 op/s 2026-03-09T00:00:17.608 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:00:17 vm04 ceph-mon[51053]: pgmap v156: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 33 KiB/s rd, 0 B/s wr, 49 op/s 2026-03-09T00:00:18.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:00:18 vm10 ceph-mon[48982]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:00:18.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:00:18 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:00:18.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:00:18 vm10 ceph-mon[48982]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T00:00:18.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:00:18 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T00:00:18.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:00:18 vm04 ceph-mon[46823]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:00:18.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:00:18 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:00:18.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:00:18 vm04 ceph-mon[46823]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T00:00:18.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:00:18 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T00:00:18.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:00:18 vm04 ceph-mon[51053]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:00:18.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:00:18 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:00:18.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:00:18 vm04 ceph-mon[51053]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T00:00:18.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:00:18 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T00:00:19.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:00:19 vm10 ceph-mon[48982]: pgmap v157: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 33 KiB/s rd, 0 B/s wr, 49 op/s 2026-03-09T00:00:19.343 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:00:18 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[50180]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:00:18] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-09T00:00:19.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:00:19 vm04 ceph-mon[46823]: pgmap v157: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 33 KiB/s rd, 0 B/s wr, 49 op/s 2026-03-09T00:00:19.350 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:00:19 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:00:19] "GET /metrics HTTP/1.1" 200 207684 "" "Prometheus/2.33.4" 2026-03-09T00:00:19.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:00:19 vm04 ceph-mon[51053]: pgmap v157: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 33 KiB/s rd, 0 B/s wr, 49 op/s 2026-03-09T00:00:22.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:00:21 vm10 ceph-mon[48982]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:00:22.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:00:21 vm10 ceph-mon[48982]: pgmap v158: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 38 KiB/s rd, 0 B/s wr, 56 op/s 2026-03-09T00:00:22.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:00:21 vm04 ceph-mon[51053]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:00:22.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:00:21 vm04 ceph-mon[51053]: pgmap v158: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 38 KiB/s rd, 0 B/s wr, 56 op/s 2026-03-09T00:00:22.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:00:21 vm04 ceph-mon[46823]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:00:22.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:00:21 vm04 ceph-mon[46823]: pgmap v158: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 38 KiB/s rd, 0 B/s wr, 56 op/s 2026-03-09T00:00:23.522 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:00:23 vm04 ceph-mon[46823]: pgmap v159: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 12 KiB/s rd, 0 B/s wr, 18 op/s 2026-03-09T00:00:23.522 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:00:23 vm04 ceph-mon[51053]: pgmap v159: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 12 KiB/s rd, 0 B/s wr, 18 op/s 2026-03-09T00:00:23.522 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:00:23 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=error ts=2026-03-09T00:00:23.520Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-09T00:00:23.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:00:23 vm10 ceph-mon[48982]: pgmap v159: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 12 KiB/s rd, 0 B/s wr, 18 op/s 2026-03-09T00:00:23.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:00:23 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-09T00:00:23.522Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-09T00:00:23.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:00:23 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-09T00:00:23.522Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-09T00:00:23.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:00:23 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=error ts=2026-03-09T00:00:23.528Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-09T00:00:23.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:00:23 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-09T00:00:23.529Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-09T00:00:23.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:00:23 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-09T00:00:23.530Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-09T00:00:25.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:00:25 vm10 ceph-mon[48982]: pgmap v160: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 13 KiB/s rd, 0 B/s wr, 18 op/s 2026-03-09T00:00:25.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:00:25 vm04 ceph-mon[46823]: pgmap v160: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 13 KiB/s rd, 0 B/s wr, 18 op/s 2026-03-09T00:00:25.606 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:00:25 vm04 ceph-mon[51053]: pgmap v160: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 13 KiB/s rd, 0 B/s wr, 18 op/s 2026-03-09T00:00:26.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:00:26 vm04 ceph-mon[46823]: pgmap v161: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 5.2 KiB/s rd, 0 B/s wr, 7 op/s 2026-03-09T00:00:26.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:00:26 vm04 ceph-mon[51053]: pgmap v161: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 5.2 KiB/s rd, 0 B/s wr, 7 op/s 2026-03-09T00:00:27.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:00:26 vm10 ceph-mon[48982]: pgmap v161: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 5.2 KiB/s rd, 0 B/s wr, 7 op/s 2026-03-09T00:00:28.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:00:28 vm10 ceph-mon[48982]: pgmap v162: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 5.2 KiB/s rd, 0 B/s wr, 7 op/s 2026-03-09T00:00:28.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:00:28 vm04 ceph-mon[46823]: pgmap v162: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 5.2 KiB/s rd, 0 B/s wr, 7 op/s 2026-03-09T00:00:28.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:00:28 vm04 ceph-mon[51053]: pgmap v162: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 5.2 KiB/s rd, 0 B/s wr, 7 op/s 2026-03-09T00:00:29.327 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:00:28 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[50180]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:00:28] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-09T00:00:29.350 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:00:29 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:00:29] "GET /metrics HTTP/1.1" 200 207684 "" "Prometheus/2.33.4" 2026-03-09T00:00:31.051 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:00:31 vm04 ceph-mon[46823]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:00:31.051 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:00:31 vm04 ceph-mon[46823]: pgmap v163: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 5.6 KiB/s rd, 0 B/s wr, 7 op/s 2026-03-09T00:00:31.051 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:00:31 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' 2026-03-09T00:00:31.051 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:00:31 vm04 ceph-mon[46823]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:00:31.051 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:00:31 vm04 ceph-mon[46823]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:00:31.051 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:00:31 vm10 ceph-mon[48982]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:00:31.051 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:00:31 vm10 ceph-mon[48982]: pgmap v163: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 5.6 KiB/s rd, 0 B/s wr, 7 op/s 2026-03-09T00:00:31.051 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:00:31 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' 2026-03-09T00:00:31.051 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:00:31 vm10 ceph-mon[48982]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:00:31.051 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:00:31 vm10 ceph-mon[48982]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:00:31.305 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:00:31 vm04 ceph-mon[51053]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:00:31.305 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:00:31 vm04 ceph-mon[51053]: pgmap v163: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 5.6 KiB/s rd, 0 B/s wr, 7 op/s 2026-03-09T00:00:31.305 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:00:31 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' 2026-03-09T00:00:31.305 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:00:31 vm04 ceph-mon[51053]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:00:31.305 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:00:31 vm04 ceph-mon[51053]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:00:32.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:00:32 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' 2026-03-09T00:00:32.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:00:32 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' 2026-03-09T00:00:32.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:00:32 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' 2026-03-09T00:00:32.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:00:32 vm10 ceph-mon[48982]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-09T00:00:32.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:00:32 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-09T00:00:32.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:00:32 vm10 ceph-mon[48982]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-09T00:00:32.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:00:32 vm10 ceph-mon[48982]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:00:32.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:00:32 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' 2026-03-09T00:00:32.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:00:32 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' 2026-03-09T00:00:32.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:00:32 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' 2026-03-09T00:00:32.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:00:32 vm04 ceph-mon[46823]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-09T00:00:32.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:00:32 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-09T00:00:32.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:00:32 vm04 ceph-mon[46823]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-09T00:00:32.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:00:32 vm04 ceph-mon[46823]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:00:32.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:00:32 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' 2026-03-09T00:00:32.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:00:32 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' 2026-03-09T00:00:32.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:00:32 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' 2026-03-09T00:00:32.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:00:32 vm04 ceph-mon[51053]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-09T00:00:32.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:00:32 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-09T00:00:32.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:00:32 vm04 ceph-mon[51053]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-09T00:00:32.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:00:32 vm04 ceph-mon[51053]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:00:33.521 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:00:33 vm04 ceph-mon[46823]: Deploying daemon mgr.x on vm10 2026-03-09T00:00:33.522 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:00:33 vm04 ceph-mon[46823]: pgmap v164: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:00:33.522 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:00:33 vm04 ceph-mon[51053]: Deploying daemon mgr.x on vm10 2026-03-09T00:00:33.522 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:00:33 vm04 ceph-mon[51053]: pgmap v164: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:00:33.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:00:33 vm10 ceph-mon[48982]: Deploying daemon mgr.x on vm10 2026-03-09T00:00:33.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:00:33 vm10 ceph-mon[48982]: pgmap v164: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:00:33.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:00:33 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=error ts=2026-03-09T00:00:33.521Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-09T00:00:33.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:00:33 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-09T00:00:33.523Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-09T00:00:33.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:00:33 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-09T00:00:33.523Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-09T00:00:33.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:00:33 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=error ts=2026-03-09T00:00:33.529Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-09T00:00:33.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:00:33 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-09T00:00:33.530Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-09T00:00:33.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:00:33 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-09T00:00:33.530Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-09T00:00:36.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:00:36 vm10 ceph-mon[48982]: pgmap v165: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:00:36.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:00:36 vm04 ceph-mon[46823]: pgmap v165: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:00:36.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:00:36 vm04 ceph-mon[51053]: pgmap v165: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:00:37.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:00:37 vm10 ceph-mon[48982]: pgmap v166: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:00:37.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:00:37 vm04 ceph-mon[46823]: pgmap v166: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:00:37.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:00:37 vm04 ceph-mon[51053]: pgmap v166: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:00:39.328 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:00:38 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[50180]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:00:38] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-09T00:00:39.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:00:39 vm10 ceph-mon[48982]: pgmap v167: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:00:39.350 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:00:39 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:00:39] "GET /metrics HTTP/1.1" 200 207670 "" "Prometheus/2.33.4" 2026-03-09T00:00:39.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:00:39 vm04 ceph-mon[46823]: pgmap v167: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:00:39.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:00:39 vm04 ceph-mon[51053]: pgmap v167: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:00:41.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:00:41 vm10 ceph-mon[48982]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:00:41.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:00:41 vm10 ceph-mon[48982]: pgmap v168: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:00:41.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:00:41 vm04 ceph-mon[51053]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:00:41.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:00:41 vm04 ceph-mon[51053]: pgmap v168: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:00:41.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:00:41 vm04 ceph-mon[46823]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:00:41.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:00:41 vm04 ceph-mon[46823]: pgmap v168: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:00:43.522 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:00:43 vm04 ceph-mon[51053]: pgmap v169: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:00:43.522 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:00:43 vm04 ceph-mon[46823]: pgmap v169: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:00:43.643 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:00:43 vm10 ceph-mon[48982]: pgmap v169: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:00:43.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:00:43 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=error ts=2026-03-09T00:00:43.522Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-09T00:00:43.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:00:43 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-09T00:00:43.524Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-09T00:00:43.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:00:43 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-09T00:00:43.524Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-09T00:00:43.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:00:43 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=error ts=2026-03-09T00:00:43.529Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-09T00:00:43.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:00:43 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-09T00:00:43.530Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-09T00:00:43.851 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:00:43 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-09T00:00:43.531Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-09T00:00:45.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:00:45 vm10 ceph-mon[48982]: pgmap v170: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:00:45.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:00:45 vm04 ceph-mon[51053]: pgmap v170: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:00:45.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:00:45 vm04 ceph-mon[46823]: pgmap v170: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:00:47.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:00:47 vm10 ceph-mon[48982]: pgmap v171: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:00:47.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:00:47 vm04 ceph-mon[51053]: pgmap v171: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:00:47.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:00:47 vm04 ceph-mon[46823]: pgmap v171: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:00:49.231 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:00:49 vm10 ceph-mon[48982]: pgmap v172: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:00:49.231 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:00:48 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[50180]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:00:48] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-09T00:00:49.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:00:49 vm04 ceph-mon[51053]: pgmap v172: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:00:49.351 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:00:49 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:00:49] "GET /metrics HTTP/1.1" 200 207667 "" "Prometheus/2.33.4" 2026-03-09T00:00:49.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:00:49 vm04 ceph-mon[46823]: pgmap v172: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:00:51.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:00:51 vm10 ceph-mon[48982]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:00:51.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:00:51 vm10 ceph-mon[48982]: pgmap v173: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:00:51.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:00:51 vm04 ceph-mon[51053]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:00:51.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:00:51 vm04 ceph-mon[51053]: pgmap v173: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:00:51.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:00:51 vm04 ceph-mon[46823]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:00:51.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:00:51 vm04 ceph-mon[46823]: pgmap v173: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:00:53.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:00:53 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=error ts=2026-03-09T00:00:53.523Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-09T00:00:53.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:00:53 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-09T00:00:53.525Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-09T00:00:53.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:00:53 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-09T00:00:53.525Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-09T00:00:53.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:00:53 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=error ts=2026-03-09T00:00:53.529Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-09T00:00:53.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:00:53 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-09T00:00:53.531Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-09T00:00:53.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:00:53 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-09T00:00:53.531Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-09T00:00:53.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:00:53 vm04 ceph-mon[51053]: pgmap v174: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:00:53.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:00:53 vm04 ceph-mon[46823]: pgmap v174: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:00:54.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:00:53 vm10 ceph-mon[48982]: pgmap v174: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:00:55.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:00:54 vm10 ceph-mon[48982]: pgmap v175: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:00:55.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:00:54 vm04 ceph-mon[51053]: pgmap v175: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:00:55.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:00:54 vm04 ceph-mon[46823]: pgmap v175: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:00:57.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:00:56 vm10 ceph-mon[48982]: pgmap v176: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:00:57.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:00:56 vm04 ceph-mon[51053]: pgmap v176: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:00:57.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:00:56 vm04 ceph-mon[46823]: pgmap v176: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:00:59.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:00:59 vm10 ceph-mon[48982]: pgmap v177: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:00:59.328 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:00:58 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[50180]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:00:58] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-09T00:00:59.350 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:00:59 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:00:59] "GET /metrics HTTP/1.1" 200 207667 "" "Prometheus/2.33.4" 2026-03-09T00:00:59.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:00:59 vm04 ceph-mon[51053]: pgmap v177: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:00:59.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:00:59 vm04 ceph-mon[46823]: pgmap v177: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:01:01.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:01:01 vm10 ceph-mon[48982]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:01:01.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:01:01 vm10 ceph-mon[48982]: pgmap v178: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:01:01.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:01:01 vm04 ceph-mon[51053]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:01:01.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:01:01 vm04 ceph-mon[51053]: pgmap v178: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:01:01.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:01:01 vm04 ceph-mon[46823]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:01:01.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:01:01 vm04 ceph-mon[46823]: pgmap v178: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:01:03.524 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:01:03 vm04 ceph-mon[51053]: pgmap v179: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:01:03.524 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:01:03 vm04 ceph-mon[46823]: pgmap v179: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:01:03.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:01:03 vm10 ceph-mon[48982]: pgmap v179: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:01:03.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:01:03 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=error ts=2026-03-09T00:01:03.524Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-09T00:01:03.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:01:03 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-09T00:01:03.526Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-09T00:01:03.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:01:03 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-09T00:01:03.526Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-09T00:01:03.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:01:03 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=error ts=2026-03-09T00:01:03.530Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-09T00:01:03.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:01:03 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-09T00:01:03.531Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-09T00:01:03.851 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:01:03 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-09T00:01:03.531Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-09T00:01:06.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:01:05 vm10 ceph-mon[48982]: pgmap v180: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:01:06.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:01:05 vm04 ceph-mon[51053]: pgmap v180: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:01:06.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:01:05 vm04 ceph-mon[46823]: pgmap v180: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:01:07.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:01:06 vm10 ceph-mon[48982]: pgmap v181: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:01:07.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:01:06 vm04 ceph-mon[51053]: pgmap v181: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:01:07.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:01:06 vm04 ceph-mon[46823]: pgmap v181: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:01:09.327 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:01:08 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[50180]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:01:08] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-09T00:01:09.350 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:01:09 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:01:09] "GET /metrics HTTP/1.1" 200 207654 "" "Prometheus/2.33.4" 2026-03-09T00:01:09.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:01:09 vm04 ceph-mon[51053]: pgmap v182: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:01:09.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:01:09 vm04 ceph-mon[46823]: pgmap v182: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:01:09.936 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:01:09 vm10 ceph-mon[48982]: pgmap v182: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:01:09.936 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:01:09 vm10 systemd[1]: Stopping Ceph mgr.x for fdcbddf6-1b49-11f1-80b0-7392062373f9... 2026-03-09T00:01:10.259 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:01:09 vm10 bash[67684]: Error: no container with name or ID "ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr.x" found: no such container 2026-03-09T00:01:10.259 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:01:10 vm10 podman[67692]: 2026-03-09 00:01:10.018572454 +0000 UTC m=+0.051687208 container died 4eb30174d7d9303967d6907206fbb20c6402c081efe59ea349a570c810359b71 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x, release=754, distribution-scope=public, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., CEPH_POINT_RELEASE=-17.2.0, name=centos-stream, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, vcs-type=git, RELEASE=HEAD, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, io.openshift.tags=base centos centos-stream, vendor=Red Hat, Inc., com.redhat.component=centos-stream-container, maintainer=Guillaume Abrioux , GIT_CLEAN=True, ceph=True, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, GIT_REPO=https://github.com/ceph/ceph-container.git, version=8, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, io.buildah.version=1.19.8, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, build-date=2022-05-03T08:36:31.336870, GIT_BRANCH=HEAD, architecture=x86_64, io.k8s.display-name=CentOS Stream 8, io.openshift.expose-services=) 2026-03-09T00:01:10.259 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:01:10 vm10 podman[67692]: 2026-03-09 00:01:10.04068214 +0000 UTC m=+0.073796894 container remove 4eb30174d7d9303967d6907206fbb20c6402c081efe59ea349a570c810359b71 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, name=centos-stream, vendor=Red Hat, Inc., distribution-scope=public, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, io.k8s.display-name=CentOS Stream 8, GIT_REPO=https://github.com/ceph/ceph-container.git, vcs-type=git, io.openshift.tags=base centos centos-stream, GIT_BRANCH=HEAD, architecture=x86_64, GIT_CLEAN=True, build-date=2022-05-03T08:36:31.336870, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, com.redhat.component=centos-stream-container, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, io.buildah.version=1.19.8, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., maintainer=Guillaume Abrioux , io.openshift.expose-services=, ceph=True, release=754, version=8, CEPH_POINT_RELEASE=-17.2.0, RELEASE=HEAD, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754) 2026-03-09T00:01:10.259 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:01:10 vm10 bash[67692]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x 2026-03-09T00:01:10.260 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:01:10 vm10 systemd[1]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@mgr.x.service: Main process exited, code=exited, status=143/n/a 2026-03-09T00:01:10.260 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:01:10 vm10 bash[67711]: Error: no container with name or ID "ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr.x" found: no such container 2026-03-09T00:01:10.260 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:01:10 vm10 systemd[1]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@mgr.x.service: Failed with result 'exit-code'. 2026-03-09T00:01:10.260 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:01:10 vm10 systemd[1]: Stopped Ceph mgr.x for fdcbddf6-1b49-11f1-80b0-7392062373f9. 2026-03-09T00:01:10.260 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:01:10 vm10 systemd[1]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@mgr.x.service: Consumed 12.127s CPU time. 2026-03-09T00:01:10.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:01:10 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-09T00:01:10.068Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=6 err="Post \"https://192.168.123.110:8443/api/prometheus_receiver\": dial tcp 192.168.123.110:8443: connect: connection refused" 2026-03-09T00:01:10.511 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:01:10 vm10 systemd[1]: Starting Ceph mgr.x for fdcbddf6-1b49-11f1-80b0-7392062373f9... 2026-03-09T00:01:10.511 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:01:10 vm10 podman[67793]: 2026-03-09 00:01:10.359429692 +0000 UTC m=+0.017078421 container create 2d7d59a967f30e363c77f798c9f618a193560dc44e86c19ca25014c48203ee86 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, CEPH_REF=squid, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.build-date=20260223, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9) 2026-03-09T00:01:10.511 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:01:10 vm10 podman[67793]: 2026-03-09 00:01:10.390210075 +0000 UTC m=+0.047858804 container init 2d7d59a967f30e363c77f798c9f618a193560dc44e86c19ca25014c48203ee86 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, io.buildah.version=1.41.3, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.build-date=20260223, OSD_FLAVOR=default, ceph=True, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.schema-version=1.0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9) 2026-03-09T00:01:10.511 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:01:10 vm10 podman[67793]: 2026-03-09 00:01:10.393330243 +0000 UTC m=+0.050978972 container start 2d7d59a967f30e363c77f798c9f618a193560dc44e86c19ca25014c48203ee86 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, CEPH_REF=squid, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.build-date=20260223, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9) 2026-03-09T00:01:10.511 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:01:10 vm10 bash[67793]: 2d7d59a967f30e363c77f798c9f618a193560dc44e86c19ca25014c48203ee86 2026-03-09T00:01:10.511 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:01:10 vm10 podman[67793]: 2026-03-09 00:01:10.352575847 +0000 UTC m=+0.010224576 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc 2026-03-09T00:01:10.511 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:01:10 vm10 systemd[1]: Started Ceph mgr.x for fdcbddf6-1b49-11f1-80b0-7392062373f9. 2026-03-09T00:01:10.511 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:01:10 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: 2026-03-09T00:01:10.508+0000 7fb1abe8e140 -1 mgr[py] Module alerts has missing NOTIFY_TYPES member 2026-03-09T00:01:10.775 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:01:10 vm10 ceph-mon[48982]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:01:10.775 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:01:10 vm10 ceph-mon[48982]: pgmap v183: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:01:10.775 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:01:10 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' 2026-03-09T00:01:10.776 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:01:10 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' 2026-03-09T00:01:10.776 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:01:10 vm10 ceph-mon[48982]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:01:10.776 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:01:10 vm10 ceph-mon[48982]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:01:10.776 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:01:10 vm10 ceph-mon[48982]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:01:10.776 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:01:10 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: 2026-03-09T00:01:10.558+0000 7fb1abe8e140 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-09T00:01:11.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:01:10 vm04 ceph-mon[51053]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:01:11.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:01:10 vm04 ceph-mon[51053]: pgmap v183: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:01:11.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:01:10 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' 2026-03-09T00:01:11.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:01:10 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' 2026-03-09T00:01:11.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:01:10 vm04 ceph-mon[51053]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:01:11.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:01:10 vm04 ceph-mon[51053]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:01:11.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:01:10 vm04 ceph-mon[51053]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:01:11.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:01:10 vm04 ceph-mon[46823]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:01:11.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:01:10 vm04 ceph-mon[46823]: pgmap v183: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:01:11.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:01:10 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' 2026-03-09T00:01:11.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:01:10 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' 2026-03-09T00:01:11.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:01:10 vm04 ceph-mon[46823]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:01:11.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:01:10 vm04 ceph-mon[46823]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:01:11.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:01:10 vm04 ceph-mon[46823]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:01:11.508 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:01:11 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: 2026-03-09T00:01:11.119+0000 7fb1abe8e140 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-09T00:01:11.508 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:01:11 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: 2026-03-09T00:01:11.507+0000 7fb1abe8e140 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-09T00:01:11.761 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:01:11 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: /lib64/python3.9/site-packages/scipy/__init__.py:73: UserWarning: NumPy was imported from a Python sub-interpreter but NumPy does not properly support sub-interpreters. This will likely work for most users but might cause hard to track down issues or subtle bugs. A common user of the rare sub-interpreter feature is wsgi which also allows single-interpreter mode. 2026-03-09T00:01:11.761 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:01:11 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: Improvements in the case of bugs are welcome, but is not on the NumPy roadmap, and full support may require significant effort to achieve. 2026-03-09T00:01:11.761 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:01:11 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: from numpy import show_config as show_numpy_config 2026-03-09T00:01:11.761 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:01:11 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: 2026-03-09T00:01:11.624+0000 7fb1abe8e140 -1 mgr[py] Module diskprediction_local has missing NOTIFY_TYPES member 2026-03-09T00:01:11.761 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:01:11 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: 2026-03-09T00:01:11.668+0000 7fb1abe8e140 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-09T00:01:11.761 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:01:11 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: 2026-03-09T00:01:11.759+0000 7fb1abe8e140 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-09T00:01:12.669 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:01:12 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: 2026-03-09T00:01:12.378+0000 7fb1abe8e140 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-09T00:01:12.669 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:01:12 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: 2026-03-09T00:01:12.503+0000 7fb1abe8e140 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-09T00:01:12.669 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:01:12 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: 2026-03-09T00:01:12.544+0000 7fb1abe8e140 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-09T00:01:12.669 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:01:12 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: 2026-03-09T00:01:12.581+0000 7fb1abe8e140 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-09T00:01:12.669 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:01:12 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: 2026-03-09T00:01:12.626+0000 7fb1abe8e140 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-09T00:01:13.077 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:01:12 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: 2026-03-09T00:01:12.668+0000 7fb1abe8e140 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-09T00:01:13.077 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:01:12 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: 2026-03-09T00:01:12.846+0000 7fb1abe8e140 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-09T00:01:13.077 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:01:12 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: 2026-03-09T00:01:12.900+0000 7fb1abe8e140 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-09T00:01:13.097 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:01:12 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-09T00:01:12.761Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=7 err="Post \"https://192.168.123.110:8443/api/prometheus_receiver\": dial tcp 192.168.123.110:8443: connect: connection refused" 2026-03-09T00:01:13.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:01:13 vm04 ceph-mon[51053]: pgmap v184: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:01:13.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:01:13 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' 2026-03-09T00:01:13.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:01:13 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' 2026-03-09T00:01:13.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:01:13 vm04 ceph-mon[46823]: pgmap v184: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:01:13.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:01:13 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' 2026-03-09T00:01:13.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:01:13 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' 2026-03-09T00:01:13.448 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:01:13 vm10 ceph-mon[48982]: pgmap v184: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:01:13.448 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:01:13 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' 2026-03-09T00:01:13.448 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:01:13 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' 2026-03-09T00:01:13.448 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:01:13 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: 2026-03-09T00:01:13.142+0000 7fb1abe8e140 -1 mgr[py] Module rgw has missing NOTIFY_TYPES member 2026-03-09T00:01:13.745 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:01:13 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: 2026-03-09T00:01:13.447+0000 7fb1abe8e140 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-09T00:01:13.745 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:01:13 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: 2026-03-09T00:01:13.488+0000 7fb1abe8e140 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-09T00:01:13.745 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:01:13 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: 2026-03-09T00:01:13.533+0000 7fb1abe8e140 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-09T00:01:13.745 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:01:13 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: 2026-03-09T00:01:13.618+0000 7fb1abe8e140 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-09T00:01:13.745 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:01:13 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: 2026-03-09T00:01:13.658+0000 7fb1abe8e140 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-09T00:01:13.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:01:13 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=error ts=2026-03-09T00:01:13.525Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.110:8443/api/prometheus_receiver\": dial tcp 192.168.123.110:8443: connect: connection refused" 2026-03-09T00:01:13.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:01:13 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-09T00:01:13.527Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.110:8443/api/prometheus_receiver\": dial tcp 192.168.123.110:8443: connect: connection refused" 2026-03-09T00:01:13.851 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:01:13 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-09T00:01:13.527Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-09T00:01:13.851 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:01:13 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=error ts=2026-03-09T00:01:13.530Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.110:8443/api/prometheus_receiver\": dial tcp 192.168.123.110:8443: connect: connection refused" 2026-03-09T00:01:13.851 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:01:13 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-09T00:01:13.530Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.110:8443/api/prometheus_receiver\": dial tcp 192.168.123.110:8443: connect: connection refused" 2026-03-09T00:01:13.851 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:01:13 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-09T00:01:13.531Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-09T00:01:14.025 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:01:13 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: 2026-03-09T00:01:13.744+0000 7fb1abe8e140 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-09T00:01:14.025 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:01:13 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: 2026-03-09T00:01:13.873+0000 7fb1abe8e140 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-09T00:01:14.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:01:14 vm10 ceph-mon[48982]: Standby manager daemon x restarted 2026-03-09T00:01:14.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:01:14 vm10 ceph-mon[48982]: Standby manager daemon x started 2026-03-09T00:01:14.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:01:14 vm10 ceph-mon[48982]: from='mgr.? 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-09T00:01:14.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:01:14 vm10 ceph-mon[48982]: from='mgr.? 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-09T00:01:14.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:01:14 vm10 ceph-mon[48982]: from='mgr.? 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-09T00:01:14.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:01:14 vm10 ceph-mon[48982]: from='mgr.? 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-09T00:01:14.328 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:01:14 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: 2026-03-09T00:01:14.025+0000 7fb1abe8e140 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-09T00:01:14.328 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:01:14 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: 2026-03-09T00:01:14.063+0000 7fb1abe8e140 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-09T00:01:14.328 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:01:14 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: [09/Mar/2026:00:01:14] ENGINE Bus STARTING 2026-03-09T00:01:14.328 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:01:14 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: CherryPy Checker: 2026-03-09T00:01:14.328 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:01:14 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: The Application mounted at '' has an empty config. 2026-03-09T00:01:14.328 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:01:14 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: 2026-03-09T00:01:14.328 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:01:14 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: [09/Mar/2026:00:01:14] ENGINE Serving on http://:::9283 2026-03-09T00:01:14.328 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:01:14 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: [09/Mar/2026:00:01:14] ENGINE Bus STARTED 2026-03-09T00:01:14.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:01:14 vm04 ceph-mon[46823]: Standby manager daemon x restarted 2026-03-09T00:01:14.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:01:14 vm04 ceph-mon[46823]: Standby manager daemon x started 2026-03-09T00:01:14.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:01:14 vm04 ceph-mon[46823]: from='mgr.? 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-09T00:01:14.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:01:14 vm04 ceph-mon[46823]: from='mgr.? 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-09T00:01:14.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:01:14 vm04 ceph-mon[46823]: from='mgr.? 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-09T00:01:14.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:01:14 vm04 ceph-mon[46823]: from='mgr.? 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-09T00:01:14.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:01:14 vm04 ceph-mon[51053]: Standby manager daemon x restarted 2026-03-09T00:01:14.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:01:14 vm04 ceph-mon[51053]: Standby manager daemon x started 2026-03-09T00:01:14.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:01:14 vm04 ceph-mon[51053]: from='mgr.? 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-09T00:01:14.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:01:14 vm04 ceph-mon[51053]: from='mgr.? 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-09T00:01:14.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:01:14 vm04 ceph-mon[51053]: from='mgr.? 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-09T00:01:14.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:01:14 vm04 ceph-mon[51053]: from='mgr.? 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-09T00:01:14.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:01:14 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-09T00:01:14.531Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=3 err="Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-09T00:01:15.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:01:15 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-09T00:01:15.005Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=3 err="Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-09T00:01:15.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:01:15 vm04 ceph-mon[51053]: pgmap v185: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:01:15.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:01:15 vm04 ceph-mon[51053]: mgrmap e21: y(active, since 4m), standbys: x 2026-03-09T00:01:15.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:01:15 vm04 ceph-mon[46823]: pgmap v185: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:01:15.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:01:15 vm04 ceph-mon[46823]: mgrmap e21: y(active, since 4m), standbys: x 2026-03-09T00:01:15.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:01:15 vm10 ceph-mon[48982]: pgmap v185: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:01:15.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:01:15 vm10 ceph-mon[48982]: mgrmap e21: y(active, since 4m), standbys: x 2026-03-09T00:01:17.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:01:17 vm10 ceph-mon[48982]: pgmap v186: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:01:17.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:01:17 vm04 ceph-mon[51053]: pgmap v186: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:01:17.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:01:17 vm04 ceph-mon[46823]: pgmap v186: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:01:18.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:01:18 vm10 ceph-mon[48982]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:01:18.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:01:18 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:01:18.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:01:18 vm10 ceph-mon[48982]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T00:01:18.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:01:18 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T00:01:18.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:01:18 vm04 ceph-mon[51053]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:01:18.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:01:18 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:01:18.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:01:18 vm04 ceph-mon[51053]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T00:01:18.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:01:18 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T00:01:18.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:01:18 vm04 ceph-mon[46823]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:01:18.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:01:18 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:01:18.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:01:18 vm04 ceph-mon[46823]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T00:01:18.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:01:18 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T00:01:19.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:01:19 vm10 ceph-mon[48982]: pgmap v187: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:01:19.327 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:01:18 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:01:18] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-09T00:01:19.350 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:01:19 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:01:19] "GET /metrics HTTP/1.1" 200 207656 "" "Prometheus/2.33.4" 2026-03-09T00:01:19.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:01:19 vm04 ceph-mon[51053]: pgmap v187: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:01:19.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:01:19 vm04 ceph-mon[46823]: pgmap v187: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:01:21.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:01:21 vm10 ceph-mon[48982]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:01:21.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:01:21 vm10 ceph-mon[48982]: pgmap v188: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:01:21.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:01:21 vm04 ceph-mon[51053]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:01:21.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:01:21 vm04 ceph-mon[51053]: pgmap v188: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:01:21.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:01:21 vm04 ceph-mon[46823]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:01:21.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:01:21 vm04 ceph-mon[46823]: pgmap v188: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:01:23.526 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:01:23 vm04 ceph-mon[51053]: pgmap v189: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:01:23.526 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:01:23 vm04 ceph-mon[46823]: pgmap v189: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:01:23.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:01:23 vm10 ceph-mon[48982]: pgmap v189: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:01:23.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:01:23 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=error ts=2026-03-09T00:01:23.526Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-09T00:01:23.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:01:23 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-09T00:01:23.529Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-09T00:01:23.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:01:23 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-09T00:01:23.529Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-09T00:01:23.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:01:23 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=error ts=2026-03-09T00:01:23.530Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-09T00:01:23.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:01:23 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-09T00:01:23.531Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-09T00:01:23.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:01:23 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-09T00:01:23.531Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-09T00:01:25.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:01:25 vm10 ceph-mon[48982]: pgmap v190: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:01:25.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:01:25 vm04 ceph-mon[51053]: pgmap v190: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:01:25.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:01:25 vm04 ceph-mon[46823]: pgmap v190: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:01:27.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:01:27 vm10 ceph-mon[48982]: pgmap v191: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:01:27.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:01:27 vm04 ceph-mon[51053]: pgmap v191: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:01:27.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:01:27 vm04 ceph-mon[46823]: pgmap v191: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:01:29.214 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:01:28 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:01:28] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-09T00:01:29.350 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:01:29 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:01:29] "GET /metrics HTTP/1.1" 200 207656 "" "Prometheus/2.33.4" 2026-03-09T00:01:29.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:01:29 vm04 ceph-mon[51053]: pgmap v192: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:01:29.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:01:29 vm04 ceph-mon[46823]: pgmap v192: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:01:29.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:01:29 vm10 ceph-mon[48982]: pgmap v192: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:01:31.518 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:01:31 vm10 ceph-mon[48982]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:01:31.518 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:01:31 vm10 ceph-mon[48982]: pgmap v193: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:01:31.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:01:31 vm04 ceph-mon[51053]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:01:31.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:01:31 vm04 ceph-mon[51053]: pgmap v193: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:01:31.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:01:31 vm04 ceph-mon[46823]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:01:31.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:01:31 vm04 ceph-mon[46823]: pgmap v193: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:01:33.529 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:01:33 vm04 ceph-mon[51053]: pgmap v194: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:01:33.529 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:01:33 vm04 ceph-mon[46823]: pgmap v194: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:01:33.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:01:33 vm10 ceph-mon[48982]: pgmap v194: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:01:33.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:01:33 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=error ts=2026-03-09T00:01:33.528Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-09T00:01:33.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:01:33 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-09T00:01:33.530Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-09T00:01:33.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:01:33 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-09T00:01:33.530Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-09T00:01:33.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:01:33 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=error ts=2026-03-09T00:01:33.531Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-09T00:01:33.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:01:33 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-09T00:01:33.532Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-09T00:01:33.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:01:33 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-09T00:01:33.532Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-09T00:01:35.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:01:35 vm10 ceph-mon[48982]: pgmap v195: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:01:35.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:01:35 vm04 ceph-mon[51053]: pgmap v195: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:01:35.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:01:35 vm04 ceph-mon[46823]: pgmap v195: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:01:37.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:01:37 vm10 ceph-mon[48982]: pgmap v196: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:01:37.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:01:37 vm04 ceph-mon[51053]: pgmap v196: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:01:37.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:01:37 vm04 ceph-mon[46823]: pgmap v196: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:01:39.235 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:01:38 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:01:38] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-09T00:01:39.350 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:01:39 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:01:39] "GET /metrics HTTP/1.1" 200 207657 "" "Prometheus/2.33.4" 2026-03-09T00:01:39.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:01:39 vm04 ceph-mon[51053]: pgmap v197: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:01:39.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:01:39 vm04 ceph-mon[46823]: pgmap v197: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:01:39.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:01:39 vm10 ceph-mon[48982]: pgmap v197: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:01:42.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:01:41 vm10 ceph-mon[48982]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:01:42.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:01:41 vm10 ceph-mon[48982]: pgmap v198: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:01:42.339 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:01:41 vm04 ceph-mon[46823]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:01:42.339 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:01:41 vm04 ceph-mon[46823]: pgmap v198: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:01:42.339 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:01:41 vm04 ceph-mon[51053]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:01:42.339 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:01:41 vm04 ceph-mon[51053]: pgmap v198: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:01:43.529 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:01:43 vm04 ceph-mon[51053]: pgmap v199: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:01:43.529 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:01:43 vm04 ceph-mon[46823]: pgmap v199: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:01:43.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:01:43 vm10 ceph-mon[48982]: pgmap v199: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:01:43.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:01:43 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=error ts=2026-03-09T00:01:43.529Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-09T00:01:43.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:01:43 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-09T00:01:43.531Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-09T00:01:43.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:01:43 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-09T00:01:43.531Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-09T00:01:43.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:01:43 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=error ts=2026-03-09T00:01:43.532Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-09T00:01:43.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:01:43 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-09T00:01:43.533Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-09T00:01:43.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:01:43 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-09T00:01:43.533Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-09T00:01:45.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:01:45 vm10 ceph-mon[48982]: pgmap v200: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:01:45.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:01:45 vm04 ceph-mon[51053]: pgmap v200: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:01:45.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:01:45 vm04 ceph-mon[46823]: pgmap v200: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:01:47.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:01:47 vm10 ceph-mon[48982]: pgmap v201: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:01:47.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:01:47 vm04 ceph-mon[51053]: pgmap v201: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:01:47.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:01:47 vm04 ceph-mon[46823]: pgmap v201: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:01:49.201 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:01:48 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:01:48] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-09T00:01:49.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:01:49 vm04 ceph-mon[46823]: pgmap v202: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:01:49.351 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:01:49 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:01:49] "GET /metrics HTTP/1.1" 200 207658 "" "Prometheus/2.33.4" 2026-03-09T00:01:49.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:01:49 vm04 ceph-mon[51053]: pgmap v202: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:01:49.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:01:49 vm10 ceph-mon[48982]: pgmap v202: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:01:51.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:01:51 vm10 ceph-mon[48982]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:01:51.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:01:51 vm10 ceph-mon[48982]: pgmap v203: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:01:51.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:01:51 vm04 ceph-mon[51053]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:01:51.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:01:51 vm04 ceph-mon[51053]: pgmap v203: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:01:51.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:01:51 vm04 ceph-mon[46823]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:01:51.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:01:51 vm04 ceph-mon[46823]: pgmap v203: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:01:53.530 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:01:53 vm04 ceph-mon[51053]: pgmap v204: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:01:53.530 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:01:53 vm04 ceph-mon[46823]: pgmap v204: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:01:53.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:01:53 vm10 ceph-mon[48982]: pgmap v204: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:01:53.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:01:53 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=error ts=2026-03-09T00:01:53.530Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-09T00:01:53.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:01:53 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-09T00:01:53.531Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-09T00:01:53.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:01:53 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-09T00:01:53.532Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-09T00:01:53.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:01:53 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=error ts=2026-03-09T00:01:53.533Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-09T00:01:53.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:01:53 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-09T00:01:53.534Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-09T00:01:53.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:01:53 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-09T00:01:53.534Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-09T00:01:55.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:01:55 vm10 ceph-mon[48982]: pgmap v205: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:01:55.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:01:55 vm04 ceph-mon[51053]: pgmap v205: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:01:55.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:01:55 vm04 ceph-mon[46823]: pgmap v205: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:01:57.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:01:57 vm10 ceph-mon[48982]: pgmap v206: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:01:57.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:01:57 vm04 ceph-mon[51053]: pgmap v206: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:01:57.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:01:57 vm04 ceph-mon[46823]: pgmap v206: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:01:59.226 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:01:58 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:01:58] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-09T00:01:59.350 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:01:59 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:01:59] "GET /metrics HTTP/1.1" 200 207658 "" "Prometheus/2.33.4" 2026-03-09T00:01:59.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:01:59 vm04 ceph-mon[51053]: pgmap v207: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:01:59.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:01:59 vm04 ceph-mon[46823]: pgmap v207: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:01:59.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:01:59 vm10 ceph-mon[48982]: pgmap v207: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:02:01.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:02:01 vm10 ceph-mon[48982]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:02:01.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:02:01 vm10 ceph-mon[48982]: pgmap v208: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:02:01.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:02:01 vm04 ceph-mon[51053]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:02:01.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:02:01 vm04 ceph-mon[51053]: pgmap v208: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:02:01.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:02:01 vm04 ceph-mon[46823]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:02:01.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:02:01 vm04 ceph-mon[46823]: pgmap v208: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:02:03.531 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:02:03 vm04 ceph-mon[51053]: pgmap v209: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:02:03.531 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:02:03 vm04 ceph-mon[46823]: pgmap v209: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:02:03.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:02:03 vm10 ceph-mon[48982]: pgmap v209: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:02:03.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:02:03 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=error ts=2026-03-09T00:02:03.531Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-09T00:02:03.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:02:03 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-09T00:02:03.532Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-09T00:02:03.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:02:03 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-09T00:02:03.533Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-09T00:02:03.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:02:03 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=error ts=2026-03-09T00:02:03.534Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-09T00:02:03.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:02:03 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-09T00:02:03.535Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-09T00:02:03.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:02:03 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-09T00:02:03.535Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-09T00:02:05.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:02:05 vm10 ceph-mon[48982]: pgmap v210: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:02:05.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:02:05 vm04 ceph-mon[51053]: pgmap v210: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:02:05.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:02:05 vm04 ceph-mon[46823]: pgmap v210: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:02:07.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:02:07 vm10 ceph-mon[48982]: pgmap v211: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:02:07.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:02:07 vm04 ceph-mon[51053]: pgmap v211: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:02:07.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:02:07 vm04 ceph-mon[46823]: pgmap v211: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:02:09.277 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:02:08 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:02:08] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-09T00:02:09.350 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:02:09 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:02:09] "GET /metrics HTTP/1.1" 200 207662 "" "Prometheus/2.33.4" 2026-03-09T00:02:09.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:02:09 vm04 ceph-mon[51053]: pgmap v212: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:02:09.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:02:09 vm04 ceph-mon[46823]: pgmap v212: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:02:09.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:02:09 vm10 ceph-mon[48982]: pgmap v212: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:02:11.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:02:11 vm10 ceph-mon[48982]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:02:11.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:02:11 vm10 ceph-mon[48982]: pgmap v213: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:02:11.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:02:11 vm04 ceph-mon[51053]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:02:11.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:02:11 vm04 ceph-mon[51053]: pgmap v213: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:02:11.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:02:11 vm04 ceph-mon[46823]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:02:11.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:02:11 vm04 ceph-mon[46823]: pgmap v213: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:02:12.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:02:12 vm10 ceph-mon[48982]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:02:12.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:02:12 vm10 ceph-mon[48982]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:02:12.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:02:12 vm10 ceph-mon[48982]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:02:12.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:02:12 vm04 ceph-mon[51053]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:02:12.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:02:12 vm04 ceph-mon[51053]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:02:12.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:02:12 vm04 ceph-mon[51053]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:02:12.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:02:12 vm04 ceph-mon[46823]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:02:12.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:02:12 vm04 ceph-mon[46823]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:02:12.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:02:12 vm04 ceph-mon[46823]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:02:13.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:02:13 vm10 ceph-mon[48982]: pgmap v214: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:02:13.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:02:13 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' 2026-03-09T00:02:13.600 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:02:13 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=error ts=2026-03-09T00:02:13.531Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-09T00:02:13.600 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:02:13 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-09T00:02:13.533Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-09T00:02:13.600 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:02:13 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-09T00:02:13.533Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-09T00:02:13.600 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:02:13 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=error ts=2026-03-09T00:02:13.534Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-09T00:02:13.600 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:02:13 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-09T00:02:13.535Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-09T00:02:13.600 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:02:13 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-09T00:02:13.536Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-09T00:02:13.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:02:13 vm04 ceph-mon[51053]: pgmap v214: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:02:13.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:02:13 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' 2026-03-09T00:02:13.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:02:13 vm04 ceph-mon[46823]: pgmap v214: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:02:13.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:02:13 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' 2026-03-09T00:02:14.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:02:14 vm10 ceph-mon[48982]: pgmap v215: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:02:14.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:02:14 vm04 ceph-mon[51053]: pgmap v215: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:02:14.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:02:14 vm04 ceph-mon[46823]: pgmap v215: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:02:17.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:02:16 vm10 ceph-mon[48982]: pgmap v216: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:02:17.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:02:16 vm04 ceph-mon[51053]: pgmap v216: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:02:17.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:02:16 vm04 ceph-mon[46823]: pgmap v216: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:02:18.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:02:18 vm10 ceph-mon[48982]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:02:18.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:02:18 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:02:18.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:02:18 vm10 ceph-mon[48982]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T00:02:18.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:02:18 vm10 ceph-mon[48982]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T00:02:18.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:02:18 vm04 ceph-mon[51053]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:02:18.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:02:18 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:02:18.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:02:18 vm04 ceph-mon[51053]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T00:02:18.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:02:18 vm04 ceph-mon[51053]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T00:02:18.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:02:18 vm04 ceph-mon[46823]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:02:18.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:02:18 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:02:18.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:02:18 vm04 ceph-mon[46823]: from='mgr.24298 192.168.123.104:0/948696746' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T00:02:18.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:02:18 vm04 ceph-mon[46823]: from='mgr.24298 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T00:02:19.204 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:02:18 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:02:18] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-09T00:02:19.350 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:02:19 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:02:19] "GET /metrics HTTP/1.1" 200 207663 "" "Prometheus/2.33.4" 2026-03-09T00:02:19.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:02:19 vm04 ceph-mon[51053]: pgmap v217: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:02:19.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:02:19 vm04 ceph-mon[46823]: pgmap v217: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:02:19.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:02:19 vm10 ceph-mon[48982]: pgmap v217: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:02:21.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:02:21 vm10 ceph-mon[48982]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:02:21.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:02:21 vm10 ceph-mon[48982]: pgmap v218: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:02:21.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:02:21 vm04 ceph-mon[46823]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:02:21.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:02:21 vm04 ceph-mon[46823]: pgmap v218: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:02:21.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:02:21 vm04 ceph-mon[51053]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:02:21.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:02:21 vm04 ceph-mon[51053]: pgmap v218: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:02:23.531 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:02:23 vm04 ceph-mon[51053]: pgmap v219: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:02:23.531 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:02:23 vm04 ceph-mon[46823]: pgmap v219: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:02:23.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:02:23 vm10 ceph-mon[48982]: pgmap v219: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:02:23.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:02:23 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=error ts=2026-03-09T00:02:23.531Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-09T00:02:23.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:02:23 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-09T00:02:23.533Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-09T00:02:23.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:02:23 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-09T00:02:23.533Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-09T00:02:23.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:02:23 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=error ts=2026-03-09T00:02:23.535Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-09T00:02:23.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:02:23 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-09T00:02:23.536Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-09T00:02:23.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:02:23 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-09T00:02:23.536Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-09T00:02:25.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:02:25 vm10 ceph-mon[48982]: pgmap v220: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:02:25.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:02:25 vm04 ceph-mon[46823]: pgmap v220: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:02:25.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:02:25 vm04 ceph-mon[51053]: pgmap v220: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:02:27.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:02:27 vm10 ceph-mon[48982]: pgmap v221: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:02:27.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:02:27 vm04 ceph-mon[51053]: pgmap v221: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:02:27.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:02:27 vm04 ceph-mon[46823]: pgmap v221: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:02:29.327 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:02:28 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:02:28] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-09T00:02:29.350 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:02:29 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:02:29] "GET /metrics HTTP/1.1" 200 207663 "" "Prometheus/2.33.4" 2026-03-09T00:02:29.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:02:29 vm10 ceph-mon[48982]: pgmap v222: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:02:29.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:02:29 vm04 ceph-mon[46823]: pgmap v222: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:02:29.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:02:29 vm04 ceph-mon[51053]: pgmap v222: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:02:30.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:02:30 vm10 ceph-mon[48982]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:02:30.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:02:30 vm10 ceph-mon[48982]: pgmap v223: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:02:30.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:02:30 vm04 ceph-mon[46823]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:02:30.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:02:30 vm04 ceph-mon[46823]: pgmap v223: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:02:30.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:02:30 vm04 ceph-mon[51053]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:02:30.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:02:30 vm04 ceph-mon[51053]: pgmap v223: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:02:33.530 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:02:33 vm10 ceph-mon[48982]: pgmap v224: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:02:33.532 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:02:33 vm04 ceph-mon[46823]: pgmap v224: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:02:33.532 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:02:33 vm04 ceph-mon[51053]: pgmap v224: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:02:33.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:02:33 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=error ts=2026-03-09T00:02:33.532Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-09T00:02:33.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:02:33 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-09T00:02:33.535Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-09T00:02:33.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:02:33 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=error ts=2026-03-09T00:02:33.535Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-09T00:02:33.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:02:33 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-09T00:02:33.535Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-09T00:02:33.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:02:33 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-09T00:02:33.537Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-09T00:02:33.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:02:33 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-09T00:02:33.537Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-09T00:02:35.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:02:35 vm10 ceph-mon[48982]: pgmap v225: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:02:35.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:02:35 vm04 ceph-mon[51053]: pgmap v225: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:02:35.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:02:35 vm04 ceph-mon[46823]: pgmap v225: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:02:37.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:02:37 vm10 ceph-mon[48982]: pgmap v226: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:02:37.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:02:37 vm04 ceph-mon[51053]: pgmap v226: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:02:37.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:02:37 vm04 ceph-mon[46823]: pgmap v226: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:02:38.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:02:38 vm10 ceph-mon[48982]: pgmap v227: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:02:38.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:02:38 vm04 ceph-mon[46823]: pgmap v227: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:02:38.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:02:38 vm04 ceph-mon[51053]: pgmap v227: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:02:39.327 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:02:38 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:02:38] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-09T00:02:39.350 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:02:39 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:02:39] "GET /metrics HTTP/1.1" 200 207623 "" "Prometheus/2.33.4" 2026-03-09T00:02:41.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:02:40 vm10 ceph-mon[48982]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:02:41.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:02:40 vm10 ceph-mon[48982]: pgmap v228: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:02:41.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:02:40 vm04 ceph-mon[51053]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:02:41.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:02:40 vm04 ceph-mon[51053]: pgmap v228: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:02:41.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:02:40 vm04 ceph-mon[46823]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:02:41.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:02:40 vm04 ceph-mon[46823]: pgmap v228: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:02:43.535 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:02:43 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=error ts=2026-03-09T00:02:43.534Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-09T00:02:43.535 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:02:43 vm04 ceph-mon[51053]: pgmap v229: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:02:43.535 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:02:43 vm04 ceph-mon[46823]: pgmap v229: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:02:43.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:02:43 vm10 ceph-mon[48982]: pgmap v229: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:02:43.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:02:43 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-09T00:02:43.536Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-09T00:02:43.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:02:43 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=error ts=2026-03-09T00:02:43.536Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-09T00:02:43.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:02:43 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-09T00:02:43.536Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-09T00:02:43.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:02:43 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-09T00:02:43.537Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-09T00:02:43.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:02:43 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-09T00:02:43.538Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-09T00:02:45.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:02:45 vm10 ceph-mon[48982]: pgmap v230: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:02:45.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:02:45 vm04 ceph-mon[51053]: pgmap v230: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:02:45.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:02:45 vm04 ceph-mon[46823]: pgmap v230: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:02:47.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:02:47 vm10 ceph-mon[48982]: pgmap v231: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:02:47.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:02:47 vm04 ceph-mon[46823]: pgmap v231: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:02:47.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:02:47 vm04 ceph-mon[51053]: pgmap v231: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:02:49.266 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:02:48 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:02:48] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-09T00:02:49.350 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:02:49 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:02:49] "GET /metrics HTTP/1.1" 200 207630 "" "Prometheus/2.33.4" 2026-03-09T00:02:49.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:02:49 vm04 ceph-mon[46823]: pgmap v232: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:02:49.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:02:49 vm04 ceph-mon[51053]: pgmap v232: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:02:49.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:02:49 vm10 ceph-mon[48982]: pgmap v232: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:02:51.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:02:51 vm10 ceph-mon[48982]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:02:51.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:02:51 vm10 ceph-mon[48982]: pgmap v233: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:02:51.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:02:51 vm04 ceph-mon[46823]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:02:51.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:02:51 vm04 ceph-mon[46823]: pgmap v233: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:02:51.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:02:51 vm04 ceph-mon[51053]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:02:51.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:02:51 vm04 ceph-mon[51053]: pgmap v233: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:02:53.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:02:53 vm10 ceph-mon[48982]: pgmap v234: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:02:53.600 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:02:53 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=error ts=2026-03-09T00:02:53.535Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-09T00:02:53.611 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:02:53 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-09T00:02:53.537Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-09T00:02:53.611 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:02:53 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=error ts=2026-03-09T00:02:53.537Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-09T00:02:53.611 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:02:53 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-09T00:02:53.537Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-09T00:02:53.611 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:02:53 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-09T00:02:53.538Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-09T00:02:53.611 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:02:53 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-09T00:02:53.539Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-09T00:02:53.611 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:02:53 vm04 ceph-mon[51053]: pgmap v234: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:02:53.611 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:02:53 vm04 ceph-mon[46823]: pgmap v234: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:02:55.421 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch ps' 2026-03-09T00:02:55.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:02:55 vm10 ceph-mon[48982]: pgmap v235: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:02:55.582 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:02:55 vm04 ceph-mon[51053]: pgmap v235: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:02:55.582 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:02:55 vm04 ceph-mon[46823]: pgmap v235: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:02:55.933 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-09T00:02:55.934 INFO:teuthology.orchestra.run.vm04.stdout:alertmanager.a vm04 *:9093,9094 running (5m) 2m ago 6m 25.5M - ba2b418f427c 77a61b512c93 2026-03-09T00:02:55.934 INFO:teuthology.orchestra.run.vm04.stdout:grafana.a vm10 *:3000 running (6m) 103s ago 6m 48.8M - 8.3.5 dad864ee21e9 a1c7daa645e2 2026-03-09T00:02:55.934 INFO:teuthology.orchestra.run.vm04.stdout:iscsi.foo.vm04.fbyciv vm04 running (5m) 2m ago 5m 49.2M - 3.5 e1d6a67b021e 5cbf21b64379 2026-03-09T00:02:55.934 INFO:teuthology.orchestra.run.vm04.stdout:mgr.x vm10 *:8443,9283 running (105s) 103s ago 8m 52.6M - 19.2.3-678-ge911bdeb 654f31e6858e 2d7d59a967f3 2026-03-09T00:02:55.934 INFO:teuthology.orchestra.run.vm04.stdout:mgr.y vm04 *:9283 running (8m) 2m ago 8m 470M - 17.2.0 e1d6a67b021e 428d867911a5 2026-03-09T00:02:55.934 INFO:teuthology.orchestra.run.vm04.stdout:mon.a vm04 running (8m) 2m ago 8m 50.5M 2048M 17.2.0 e1d6a67b021e a0a441d060f5 2026-03-09T00:02:55.934 INFO:teuthology.orchestra.run.vm04.stdout:mon.b vm10 running (8m) 103s ago 8m 40.6M 2048M 17.2.0 e1d6a67b021e a4c3c4f2dde9 2026-03-09T00:02:55.934 INFO:teuthology.orchestra.run.vm04.stdout:mon.c vm04 running (8m) 2m ago 8m 39.4M 2048M 17.2.0 e1d6a67b021e 5c2d9165643c 2026-03-09T00:02:55.934 INFO:teuthology.orchestra.run.vm04.stdout:node-exporter.a vm04 *:9100 running (6m) 2m ago 6m 17.6M - 1dbe0e931976 ff6d0adb33b7 2026-03-09T00:02:55.934 INFO:teuthology.orchestra.run.vm04.stdout:node-exporter.b vm10 *:9100 running (6m) 103s ago 6m 19.9M - 1dbe0e931976 c055c663d1e8 2026-03-09T00:02:55.934 INFO:teuthology.orchestra.run.vm04.stdout:osd.0 vm04 running (7m) 2m ago 7m 53.1M 4096M 17.2.0 e1d6a67b021e eb4d6ee04c91 2026-03-09T00:02:55.934 INFO:teuthology.orchestra.run.vm04.stdout:osd.1 vm04 running (7m) 2m ago 7m 50.7M 4096M 17.2.0 e1d6a67b021e f112f05700b8 2026-03-09T00:02:55.934 INFO:teuthology.orchestra.run.vm04.stdout:osd.2 vm04 running (7m) 2m ago 7m 48.5M 4096M 17.2.0 e1d6a67b021e a4ed5ecab7e4 2026-03-09T00:02:55.934 INFO:teuthology.orchestra.run.vm04.stdout:osd.3 vm04 running (7m) 2m ago 7m 48.8M 4096M 17.2.0 e1d6a67b021e d530f6e786d9 2026-03-09T00:02:55.934 INFO:teuthology.orchestra.run.vm04.stdout:osd.4 vm10 running (7m) 103s ago 7m 49.8M 4096M 17.2.0 e1d6a67b021e ad302e6f363c 2026-03-09T00:02:55.934 INFO:teuthology.orchestra.run.vm04.stdout:osd.5 vm10 running (7m) 103s ago 7m 49.2M 4096M 17.2.0 e1d6a67b021e 1bef86fdb303 2026-03-09T00:02:55.934 INFO:teuthology.orchestra.run.vm04.stdout:osd.6 vm10 running (7m) 103s ago 7m 47.7M 4096M 17.2.0 e1d6a67b021e 168db5828111 2026-03-09T00:02:55.934 INFO:teuthology.orchestra.run.vm04.stdout:osd.7 vm10 running (6m) 103s ago 6m 51.2M 4096M 17.2.0 e1d6a67b021e bc6bbac15079 2026-03-09T00:02:55.934 INFO:teuthology.orchestra.run.vm04.stdout:prometheus.a vm10 *:9095 running (5m) 103s ago 6m 58.7M - 514e6a882f6e 60b4398433db 2026-03-09T00:02:55.934 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.ehrfsf vm04 *:8000 running (6m) 2m ago 6m 93.2M - 17.2.0 e1d6a67b021e a815abb0c790 2026-03-09T00:02:55.934 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm10.dwizvi vm10 *:8000 running (5m) 103s ago 5m 91.4M - 17.2.0 e1d6a67b021e f6412acdf6e0 2026-03-09T00:02:56.453 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph versions' 2026-03-09T00:02:56.686 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:02:56 vm04 ceph-mon[46823]: pgmap v236: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:02:56.686 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:02:56 vm04 ceph-mon[46823]: from='client.24778 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:02:56.687 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:02:56 vm04 ceph-mon[51053]: pgmap v236: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:02:56.687 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:02:56 vm04 ceph-mon[51053]: from='client.24778 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:02:56.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:02:56 vm10 ceph-mon[48982]: pgmap v236: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:02:56.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:02:56 vm10 ceph-mon[48982]: from='client.24778 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:02:57.226 INFO:teuthology.orchestra.run.vm04.stdout:{ 2026-03-09T00:02:57.226 INFO:teuthology.orchestra.run.vm04.stdout: "mon": { 2026-03-09T00:02:57.226 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 3 2026-03-09T00:02:57.226 INFO:teuthology.orchestra.run.vm04.stdout: }, 2026-03-09T00:02:57.226 INFO:teuthology.orchestra.run.vm04.stdout: "mgr": { 2026-03-09T00:02:57.226 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 1, 2026-03-09T00:02:57.226 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 1 2026-03-09T00:02:57.226 INFO:teuthology.orchestra.run.vm04.stdout: }, 2026-03-09T00:02:57.226 INFO:teuthology.orchestra.run.vm04.stdout: "osd": { 2026-03-09T00:02:57.226 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 8 2026-03-09T00:02:57.226 INFO:teuthology.orchestra.run.vm04.stdout: }, 2026-03-09T00:02:57.226 INFO:teuthology.orchestra.run.vm04.stdout: "mds": {}, 2026-03-09T00:02:57.226 INFO:teuthology.orchestra.run.vm04.stdout: "rgw": { 2026-03-09T00:02:57.226 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 2 2026-03-09T00:02:57.226 INFO:teuthology.orchestra.run.vm04.stdout: }, 2026-03-09T00:02:57.226 INFO:teuthology.orchestra.run.vm04.stdout: "overall": { 2026-03-09T00:02:57.226 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 14, 2026-03-09T00:02:57.226 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 1 2026-03-09T00:02:57.226 INFO:teuthology.orchestra.run.vm04.stdout: } 2026-03-09T00:02:57.226 INFO:teuthology.orchestra.run.vm04.stdout:} 2026-03-09T00:02:57.279 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph -s' 2026-03-09T00:02:57.478 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:02:57 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/123523453' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:02:57.478 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:02:57 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/123523453' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:02:57.827 INFO:teuthology.orchestra.run.vm04.stdout: cluster: 2026-03-09T00:02:57.827 INFO:teuthology.orchestra.run.vm04.stdout: id: fdcbddf6-1b49-11f1-80b0-7392062373f9 2026-03-09T00:02:57.827 INFO:teuthology.orchestra.run.vm04.stdout: health: HEALTH_OK 2026-03-09T00:02:57.827 INFO:teuthology.orchestra.run.vm04.stdout: 2026-03-09T00:02:57.827 INFO:teuthology.orchestra.run.vm04.stdout: services: 2026-03-09T00:02:57.827 INFO:teuthology.orchestra.run.vm04.stdout: mon: 3 daemons, quorum a,c,b (age 8m) 2026-03-09T00:02:57.827 INFO:teuthology.orchestra.run.vm04.stdout: mgr: y(active, since 6m), standbys: x 2026-03-09T00:02:57.827 INFO:teuthology.orchestra.run.vm04.stdout: osd: 8 osds: 8 up (since 6m), 8 in (since 6m) 2026-03-09T00:02:57.827 INFO:teuthology.orchestra.run.vm04.stdout: rgw: 2 daemons active (2 hosts, 1 zones) 2026-03-09T00:02:57.827 INFO:teuthology.orchestra.run.vm04.stdout: 2026-03-09T00:02:57.827 INFO:teuthology.orchestra.run.vm04.stdout: data: 2026-03-09T00:02:57.827 INFO:teuthology.orchestra.run.vm04.stdout: pools: 6 pools, 161 pgs 2026-03-09T00:02:57.827 INFO:teuthology.orchestra.run.vm04.stdout: objects: 241 objects, 457 KiB 2026-03-09T00:02:57.827 INFO:teuthology.orchestra.run.vm04.stdout: usage: 84 MiB used, 160 GiB / 160 GiB avail 2026-03-09T00:02:57.827 INFO:teuthology.orchestra.run.vm04.stdout: pgs: 161 active+clean 2026-03-09T00:02:57.827 INFO:teuthology.orchestra.run.vm04.stdout: 2026-03-09T00:02:57.827 INFO:teuthology.orchestra.run.vm04.stdout: io: 2026-03-09T00:02:57.827 INFO:teuthology.orchestra.run.vm04.stdout: client: 853 B/s rd, 0 op/s rd, 0 op/s wr 2026-03-09T00:02:57.827 INFO:teuthology.orchestra.run.vm04.stdout: 2026-03-09T00:02:57.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:02:57 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/123523453' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:02:57.878 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph health detail' 2026-03-09T00:02:58.808 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:02:58 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/1838157974' entity='client.admin' cmd=[{"prefix": "status"}]: dispatch 2026-03-09T00:02:58.808 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:02:58 vm04 ceph-mon[46823]: pgmap v237: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:02:58.808 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:02:58 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/1838157974' entity='client.admin' cmd=[{"prefix": "status"}]: dispatch 2026-03-09T00:02:58.808 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:02:58 vm04 ceph-mon[51053]: pgmap v237: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:02:58.809 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_OK 2026-03-09T00:02:58.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:02:58 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/1838157974' entity='client.admin' cmd=[{"prefix": "status"}]: dispatch 2026-03-09T00:02:58.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:02:58 vm10 ceph-mon[48982]: pgmap v237: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:02:59.008 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph versions | jq -e '"'"'.mgr | length == 2'"'"'' 2026-03-09T00:02:59.101 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:02:59 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:02:59] "GET /metrics HTTP/1.1" 200 207630 "" "Prometheus/2.33.4" 2026-03-09T00:02:59.327 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:02:58 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:02:58] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-09T00:02:59.549 INFO:teuthology.orchestra.run.vm04.stdout:true 2026-03-09T00:02:59.776 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:02:59 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/852957615' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T00:02:59.776 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:02:59 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/852957615' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T00:02:59.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:02:59 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/852957615' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T00:02:59.966 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph mgr fail' 2026-03-09T00:03:00.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:00 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/3948201297' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:03:00.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:00 vm10 ceph-mon[48982]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:03:00.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:00 vm10 ceph-mon[48982]: pgmap v238: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:03:00.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:00 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/3948201297' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:03:00.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:00 vm04 ceph-mon[51053]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:03:00.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:00 vm04 ceph-mon[51053]: pgmap v238: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:03:00.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:00 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/3948201297' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:03:00.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:00 vm04 ceph-mon[46823]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:03:00.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:00 vm04 ceph-mon[46823]: pgmap v238: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:03:01.815 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:03:01 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-09T00:03:01.657+0000 7f1d49775700 -1 mgr handle_mgr_map I was active but no longer am 2026-03-09T00:03:01.815 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:03:01 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: ignoring --setuser ceph since I am not root 2026-03-09T00:03:01.816 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:03:01 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: ignoring --setgroup ceph since I am not root 2026-03-09T00:03:01.816 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:01 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/3576446124' entity='client.admin' cmd=[{"prefix": "mgr fail"}]: dispatch 2026-03-09T00:03:01.816 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:01 vm04 ceph-mon[46823]: osdmap e80: 8 total, 8 up, 8 in 2026-03-09T00:03:01.816 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:01 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/3576446124' entity='client.admin' cmd=[{"prefix": "mgr fail"}]: dispatch 2026-03-09T00:03:01.816 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:01 vm04 ceph-mon[51053]: osdmap e80: 8 total, 8 up, 8 in 2026-03-09T00:03:01.835 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'sleep 180' 2026-03-09T00:03:01.950 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:01 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/3576446124' entity='client.admin' cmd=[{"prefix": "mgr fail"}]: dispatch 2026-03-09T00:03:01.950 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:01 vm10 ceph-mon[48982]: osdmap e80: 8 total, 8 up, 8 in 2026-03-09T00:03:01.950 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:03:01 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: [09/Mar/2026:00:03:01] ENGINE Bus STOPPING 2026-03-09T00:03:02.067 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:03:01 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-09T00:03:01.828+0000 7f6c5ad0a000 -1 mgr[py] Module alerts has missing NOTIFY_TYPES member 2026-03-09T00:03:02.067 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:03:01 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-09T00:03:01.887+0000 7f6c5ad0a000 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-09T00:03:02.292 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:03:01 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: [09/Mar/2026:00:03:01] ENGINE HTTP Server cherrypy._cpwsgi_server.CPWSGIServer(('::', 9283)) shut down 2026-03-09T00:03:02.292 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:03:01 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: [09/Mar/2026:00:03:01] ENGINE Bus STOPPED 2026-03-09T00:03:02.292 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:03:02 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: [09/Mar/2026:00:03:02] ENGINE Bus STARTING 2026-03-09T00:03:02.292 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:03:02 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: [09/Mar/2026:00:03:02] ENGINE Serving on http://:::9283 2026-03-09T00:03:02.292 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:03:02 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: [09/Mar/2026:00:03:02] ENGINE Bus STARTED 2026-03-09T00:03:02.850 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:03:02 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-09T00:03:02.413+0000 7f6c5ad0a000 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-09T00:03:02.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:02 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T00:03:02.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:02 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T00:03:02.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:02 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T00:03:02.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:02 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-09T00:03:02.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:02 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T00:03:02.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:02 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T00:03:02.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:02 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T00:03:02.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:02 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T00:03:02.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:02 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T00:03:02.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:02 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T00:03:02.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:02 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T00:03:02.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:02 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T00:03:02.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:02 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-09T00:03:02.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:02 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-09T00:03:02.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:02 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-09T00:03:02.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:02 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/3576446124' entity='client.admin' cmd='[{"prefix": "mgr fail"}]': finished 2026-03-09T00:03:02.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:02 vm04 ceph-mon[51053]: mgrmap e22: x(active, starting, since 0.948471s) 2026-03-09T00:03:02.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:02 vm04 ceph-mon[51053]: Manager daemon x is now available 2026-03-09T00:03:02.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:02 vm04 ceph-mon[51053]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:02.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:02 vm04 ceph-mon[51053]: Queued rgw.foo for migration 2026-03-09T00:03:02.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:02 vm04 ceph-mon[51053]: No Migration is needed for rgw spec: {'placement': {'count': 2}, 'service_id': 'foo', 'service_name': 'rgw.foo', 'service_type': 'rgw', 'spec': {'rgw_frontend_port': 8000, 'rgw_realm': 'r', 'rgw_zone': 'z'}} 2026-03-09T00:03:02.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:02 vm04 ceph-mon[51053]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:02.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:02 vm04 ceph-mon[51053]: Migrating certs/keys for iscsi.foo spec to cert store 2026-03-09T00:03:02.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:02 vm04 ceph-mon[51053]: Migrating certs/keys for rgw.foo spec to cert store 2026-03-09T00:03:02.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:02 vm04 ceph-mon[51053]: Checking for cert/key for grafana.a 2026-03-09T00:03:02.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:02 vm04 ceph-mon[51053]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:02.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:02 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:03:02.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:02 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:03:02.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:02 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:03:02.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:02 vm04 ceph-mon[51053]: from='mgr.24772 ' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:03:02.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:02 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/trash_purge_schedule"}]: dispatch 2026-03-09T00:03:02.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:02 vm04 ceph-mon[51053]: from='mgr.24772 ' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/trash_purge_schedule"}]: dispatch 2026-03-09T00:03:02.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:02 vm04 ceph-mon[51053]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:02.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:02 vm04 ceph-mon[51053]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:02.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:02 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T00:03:02.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:02 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T00:03:02.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:02 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T00:03:02.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:02 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-09T00:03:02.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:02 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T00:03:02.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:02 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T00:03:02.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:02 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T00:03:02.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:02 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T00:03:02.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:02 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T00:03:02.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:02 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T00:03:02.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:02 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T00:03:02.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:02 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T00:03:02.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:02 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-09T00:03:02.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:02 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-09T00:03:02.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:02 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-09T00:03:02.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:02 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/3576446124' entity='client.admin' cmd='[{"prefix": "mgr fail"}]': finished 2026-03-09T00:03:02.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:02 vm04 ceph-mon[46823]: mgrmap e22: x(active, starting, since 0.948471s) 2026-03-09T00:03:02.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:02 vm04 ceph-mon[46823]: Manager daemon x is now available 2026-03-09T00:03:02.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:02 vm04 ceph-mon[46823]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:02.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:02 vm04 ceph-mon[46823]: Queued rgw.foo for migration 2026-03-09T00:03:02.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:02 vm04 ceph-mon[46823]: No Migration is needed for rgw spec: {'placement': {'count': 2}, 'service_id': 'foo', 'service_name': 'rgw.foo', 'service_type': 'rgw', 'spec': {'rgw_frontend_port': 8000, 'rgw_realm': 'r', 'rgw_zone': 'z'}} 2026-03-09T00:03:02.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:02 vm04 ceph-mon[46823]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:02.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:02 vm04 ceph-mon[46823]: Migrating certs/keys for iscsi.foo spec to cert store 2026-03-09T00:03:02.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:02 vm04 ceph-mon[46823]: Migrating certs/keys for rgw.foo spec to cert store 2026-03-09T00:03:02.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:02 vm04 ceph-mon[46823]: Checking for cert/key for grafana.a 2026-03-09T00:03:02.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:02 vm04 ceph-mon[46823]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:02.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:02 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:03:02.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:02 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:03:02.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:02 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:03:02.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:02 vm04 ceph-mon[46823]: from='mgr.24772 ' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:03:02.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:02 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/trash_purge_schedule"}]: dispatch 2026-03-09T00:03:02.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:02 vm04 ceph-mon[46823]: from='mgr.24772 ' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/trash_purge_schedule"}]: dispatch 2026-03-09T00:03:02.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:02 vm04 ceph-mon[46823]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:02.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:02 vm04 ceph-mon[46823]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:03.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:02 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T00:03:03.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:02 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T00:03:03.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:02 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T00:03:03.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:02 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-09T00:03:03.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:02 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T00:03:03.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:02 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T00:03:03.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:02 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T00:03:03.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:02 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T00:03:03.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:02 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T00:03:03.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:02 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T00:03:03.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:02 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T00:03:03.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:02 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T00:03:03.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:02 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-09T00:03:03.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:02 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-09T00:03:03.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:02 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-09T00:03:03.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:02 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/3576446124' entity='client.admin' cmd='[{"prefix": "mgr fail"}]': finished 2026-03-09T00:03:03.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:02 vm10 ceph-mon[48982]: mgrmap e22: x(active, starting, since 0.948471s) 2026-03-09T00:03:03.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:02 vm10 ceph-mon[48982]: Manager daemon x is now available 2026-03-09T00:03:03.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:02 vm10 ceph-mon[48982]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:03.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:02 vm10 ceph-mon[48982]: Queued rgw.foo for migration 2026-03-09T00:03:03.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:02 vm10 ceph-mon[48982]: No Migration is needed for rgw spec: {'placement': {'count': 2}, 'service_id': 'foo', 'service_name': 'rgw.foo', 'service_type': 'rgw', 'spec': {'rgw_frontend_port': 8000, 'rgw_realm': 'r', 'rgw_zone': 'z'}} 2026-03-09T00:03:03.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:02 vm10 ceph-mon[48982]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:03.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:02 vm10 ceph-mon[48982]: Migrating certs/keys for iscsi.foo spec to cert store 2026-03-09T00:03:03.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:02 vm10 ceph-mon[48982]: Migrating certs/keys for rgw.foo spec to cert store 2026-03-09T00:03:03.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:02 vm10 ceph-mon[48982]: Checking for cert/key for grafana.a 2026-03-09T00:03:03.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:02 vm10 ceph-mon[48982]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:03.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:02 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:03:03.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:02 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:03:03.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:02 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:03:03.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:02 vm10 ceph-mon[48982]: from='mgr.24772 ' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:03:03.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:02 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/trash_purge_schedule"}]: dispatch 2026-03-09T00:03:03.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:02 vm10 ceph-mon[48982]: from='mgr.24772 ' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/trash_purge_schedule"}]: dispatch 2026-03-09T00:03:03.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:02 vm10 ceph-mon[48982]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:03.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:02 vm10 ceph-mon[48982]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:03.267 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:03:02 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-09T00:03:02.858+0000 7f6c5ad0a000 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-09T00:03:03.267 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:03:03 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-09T00:03:03.021+0000 7f6c5ad0a000 -1 mgr[py] Module diskprediction_local has missing NOTIFY_TYPES member 2026-03-09T00:03:03.267 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:03:03 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-09T00:03:03.081+0000 7f6c5ad0a000 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-09T00:03:03.539 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:03:03 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-09T00:03:03.266+0000 7f6c5ad0a000 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-09T00:03:03.666 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:03 vm10 ceph-mon[48982]: Deploying cephadm binary to vm04 2026-03-09T00:03:03.666 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:03 vm10 ceph-mon[48982]: Deploying cephadm binary to vm10 2026-03-09T00:03:03.666 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:03 vm10 ceph-mon[48982]: mgrmap e23: x(active, since 1.95361s) 2026-03-09T00:03:03.666 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:03 vm10 ceph-mon[48982]: pgmap v3: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail 2026-03-09T00:03:03.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:03 vm04 ceph-mon[46823]: Deploying cephadm binary to vm04 2026-03-09T00:03:03.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:03 vm04 ceph-mon[46823]: Deploying cephadm binary to vm10 2026-03-09T00:03:03.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:03 vm04 ceph-mon[46823]: mgrmap e23: x(active, since 1.95361s) 2026-03-09T00:03:03.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:03 vm04 ceph-mon[46823]: pgmap v3: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail 2026-03-09T00:03:03.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:03 vm04 ceph-mon[51053]: Deploying cephadm binary to vm04 2026-03-09T00:03:03.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:03 vm04 ceph-mon[51053]: Deploying cephadm binary to vm10 2026-03-09T00:03:03.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:03 vm04 ceph-mon[51053]: mgrmap e23: x(active, since 1.95361s) 2026-03-09T00:03:03.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:03 vm04 ceph-mon[51053]: pgmap v3: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail 2026-03-09T00:03:03.851 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:03:03 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=error ts=2026-03-09T00:03:03.539Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-09T00:03:03.851 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:03:03 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-09T00:03:03.540Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.104:8443//api/prometheus_receiver\": dial tcp 192.168.123.104:8443: connect: connection refused" 2026-03-09T00:03:03.851 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:03:03 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=error ts=2026-03-09T00:03:03.539Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-09T00:03:03.851 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:03:03 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-09T00:03:03.542Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.104:8443//api/prometheus_receiver\": dial tcp 192.168.123.104:8443: connect: connection refused" 2026-03-09T00:03:03.851 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:03:03 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-09T00:03:03.542Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-09T00:03:03.851 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:03:03 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-09T00:03:03.545Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-09T00:03:04.330 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:03:03 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-09T00:03:03.993+0000 7f6c5ad0a000 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-09T00:03:04.586 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:03:04 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-09T00:03:04.327+0000 7f6c5ad0a000 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-09T00:03:04.586 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:03:04 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-09T00:03:04.412+0000 7f6c5ad0a000 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-09T00:03:04.586 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:03:04 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-09T00:03:04.533+0000 7f6c5ad0a000 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-09T00:03:04.587 INFO:journalctl@ceph.osd.0.vm04.stdout:Mar 09 00:03:04 vm04 sudo[76310]: ceph : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/sbin/smartctl -x --json=o /dev/vde 2026-03-09T00:03:04.587 INFO:journalctl@ceph.osd.0.vm04.stdout:Mar 09 00:03:04 vm04 sudo[76310]: pam_unix(sudo:session): session opened for user root by (uid=0) 2026-03-09T00:03:04.587 INFO:journalctl@ceph.osd.0.vm04.stdout:Mar 09 00:03:04 vm04 sudo[76310]: pam_unix(sudo:session): session closed for user root 2026-03-09T00:03:04.587 INFO:journalctl@ceph.osd.1.vm04.stdout:Mar 09 00:03:04 vm04 sudo[76360]: ceph : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/sbin/smartctl -x --json=o /dev/vdd 2026-03-09T00:03:04.587 INFO:journalctl@ceph.osd.1.vm04.stdout:Mar 09 00:03:04 vm04 sudo[76360]: pam_unix(sudo:session): session opened for user root by (uid=0) 2026-03-09T00:03:04.587 INFO:journalctl@ceph.osd.1.vm04.stdout:Mar 09 00:03:04 vm04 sudo[76360]: pam_unix(sudo:session): session closed for user root 2026-03-09T00:03:04.853 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:04 vm04 ceph-mon[46823]: [09/Mar/2026:00:03:03] ENGINE Bus STARTING 2026-03-09T00:03:04.853 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:04 vm04 ceph-mon[46823]: pgmap v4: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail 2026-03-09T00:03:04.853 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:04 vm04 ceph-mon[46823]: [09/Mar/2026:00:03:03] ENGINE Serving on https://192.168.123.110:7150 2026-03-09T00:03:04.853 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:04 vm04 ceph-mon[46823]: [09/Mar/2026:00:03:03] ENGINE Client ('192.168.123.110', 54098) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-09T00:03:04.853 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:04 vm04 ceph-mon[46823]: [09/Mar/2026:00:03:03] ENGINE Serving on http://192.168.123.110:8765 2026-03-09T00:03:04.853 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:04 vm04 ceph-mon[46823]: [09/Mar/2026:00:03:03] ENGINE Bus STARTED 2026-03-09T00:03:04.853 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:03:04 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-09T00:03:04.645+0000 7f6c5ad0a000 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-09T00:03:04.853 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:03:04 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-09T00:03:04.742+0000 7f6c5ad0a000 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-09T00:03:04.853 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:04 vm04 ceph-mon[51053]: [09/Mar/2026:00:03:03] ENGINE Bus STARTING 2026-03-09T00:03:04.853 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:04 vm04 ceph-mon[51053]: pgmap v4: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail 2026-03-09T00:03:04.853 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:04 vm04 ceph-mon[51053]: [09/Mar/2026:00:03:03] ENGINE Serving on https://192.168.123.110:7150 2026-03-09T00:03:04.853 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:04 vm04 ceph-mon[51053]: [09/Mar/2026:00:03:03] ENGINE Client ('192.168.123.110', 54098) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-09T00:03:04.853 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:04 vm04 ceph-mon[51053]: [09/Mar/2026:00:03:03] ENGINE Serving on http://192.168.123.110:8765 2026-03-09T00:03:04.853 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:04 vm04 ceph-mon[51053]: [09/Mar/2026:00:03:03] ENGINE Bus STARTED 2026-03-09T00:03:04.915 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:04 vm10 ceph-mon[48982]: [09/Mar/2026:00:03:03] ENGINE Bus STARTING 2026-03-09T00:03:04.915 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:04 vm10 ceph-mon[48982]: pgmap v4: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail 2026-03-09T00:03:04.915 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:04 vm10 ceph-mon[48982]: [09/Mar/2026:00:03:03] ENGINE Serving on https://192.168.123.110:7150 2026-03-09T00:03:04.915 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:04 vm10 ceph-mon[48982]: [09/Mar/2026:00:03:03] ENGINE Client ('192.168.123.110', 54098) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-09T00:03:04.915 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:04 vm10 ceph-mon[48982]: [09/Mar/2026:00:03:03] ENGINE Serving on http://192.168.123.110:8765 2026-03-09T00:03:04.915 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:04 vm10 ceph-mon[48982]: [09/Mar/2026:00:03:03] ENGINE Bus STARTED 2026-03-09T00:03:05.161 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 09 00:03:04 vm04 sudo[76425]: ceph : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/sbin/smartctl -x --json=o /dev/vdc 2026-03-09T00:03:05.162 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 09 00:03:04 vm04 sudo[76425]: pam_unix(sudo:session): session opened for user root by (uid=0) 2026-03-09T00:03:05.162 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 09 00:03:04 vm04 sudo[76425]: pam_unix(sudo:session): session closed for user root 2026-03-09T00:03:05.162 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 09 00:03:05 vm04 sudo[76478]: ceph : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/sbin/smartctl -x --json=o /dev/vdb 2026-03-09T00:03:05.412 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:03:05 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-09T00:03:05.312+0000 7f6c5ad0a000 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-09T00:03:05.412 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 09 00:03:05 vm04 sudo[76478]: pam_unix(sudo:session): session opened for user root by (uid=0) 2026-03-09T00:03:05.412 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 09 00:03:05 vm04 sudo[76478]: pam_unix(sudo:session): session closed for user root 2026-03-09T00:03:05.709 INFO:journalctl@ceph.osd.4.vm10.stdout:Mar 09 00:03:05 vm10 sudo[69182]: ceph : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/sbin/smartctl -x --json=o /dev/vde 2026-03-09T00:03:05.710 INFO:journalctl@ceph.osd.4.vm10.stdout:Mar 09 00:03:05 vm10 sudo[69182]: pam_unix(sudo:session): session opened for user root by (uid=0) 2026-03-09T00:03:05.710 INFO:journalctl@ceph.osd.4.vm10.stdout:Mar 09 00:03:05 vm10 sudo[69182]: pam_unix(sudo:session): session closed for user root 2026-03-09T00:03:05.710 INFO:journalctl@ceph.osd.5.vm10.stdout:Mar 09 00:03:05 vm10 sudo[69199]: ceph : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/sbin/smartctl -x --json=o /dev/vdd 2026-03-09T00:03:05.710 INFO:journalctl@ceph.osd.5.vm10.stdout:Mar 09 00:03:05 vm10 sudo[69199]: pam_unix(sudo:session): session opened for user root by (uid=0) 2026-03-09T00:03:05.710 INFO:journalctl@ceph.osd.5.vm10.stdout:Mar 09 00:03:05 vm10 sudo[69199]: pam_unix(sudo:session): session closed for user root 2026-03-09T00:03:05.710 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:05 vm04 ceph-mon[46823]: mgrmap e24: x(active, since 3s) 2026-03-09T00:03:05.710 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:05 vm04 ceph-mon[46823]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:05.710 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:05 vm04 ceph-mon[46823]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:05.710 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:05 vm04 ceph-mon[46823]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:05.711 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:05 vm04 ceph-mon[46823]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:05.711 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:05 vm04 ceph-mon[46823]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:05.711 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:05 vm04 ceph-mon[46823]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:05.711 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:05 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm10", "name": "osd_memory_target"}]: dispatch 2026-03-09T00:03:05.711 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:05 vm04 ceph-mon[46823]: from='mgr.24772 ' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm10", "name": "osd_memory_target"}]: dispatch 2026-03-09T00:03:05.711 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:03:05 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-09T00:03:05.411+0000 7f6c5ad0a000 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-09T00:03:05.711 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:05 vm04 ceph-mon[51053]: mgrmap e24: x(active, since 3s) 2026-03-09T00:03:05.711 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:05 vm04 ceph-mon[51053]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:05.711 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:05 vm04 ceph-mon[51053]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:05.711 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:05 vm04 ceph-mon[51053]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:05.711 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:05 vm04 ceph-mon[51053]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:05.711 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:05 vm04 ceph-mon[51053]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:05.711 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:05 vm04 ceph-mon[51053]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:05.711 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:05 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm10", "name": "osd_memory_target"}]: dispatch 2026-03-09T00:03:05.711 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:05 vm04 ceph-mon[51053]: from='mgr.24772 ' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm10", "name": "osd_memory_target"}]: dispatch 2026-03-09T00:03:05.982 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:05 vm10 ceph-mon[48982]: mgrmap e24: x(active, since 3s) 2026-03-09T00:03:05.982 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:05 vm10 ceph-mon[48982]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:05.982 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:05 vm10 ceph-mon[48982]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:05.982 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:05 vm10 ceph-mon[48982]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:05.982 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:05 vm10 ceph-mon[48982]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:05.982 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:05 vm10 ceph-mon[48982]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:05.982 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:05 vm10 ceph-mon[48982]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:05.982 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:05 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm10", "name": "osd_memory_target"}]: dispatch 2026-03-09T00:03:05.982 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:05 vm10 ceph-mon[48982]: from='mgr.24772 ' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm10", "name": "osd_memory_target"}]: dispatch 2026-03-09T00:03:05.982 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:03:05 vm10 sudo[69202]: ceph : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/sbin/smartctl -x --json=o /dev/vdc 2026-03-09T00:03:05.982 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:03:05 vm10 sudo[69202]: pam_unix(sudo:session): session opened for user root by (uid=0) 2026-03-09T00:03:05.982 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:03:05 vm10 sudo[69202]: pam_unix(sudo:session): session closed for user root 2026-03-09T00:03:06.327 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:03:05 vm10 sudo[69205]: ceph : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/sbin/smartctl -x --json=o /dev/vdb 2026-03-09T00:03:06.328 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:03:05 vm10 sudo[69205]: pam_unix(sudo:session): session opened for user root by (uid=0) 2026-03-09T00:03:06.328 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:03:05 vm10 sudo[69205]: pam_unix(sudo:session): session closed for user root 2026-03-09T00:03:06.418 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:03:06 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-09T00:03:06.114+0000 7f6c5ad0a000 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-09T00:03:06.419 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:03:06 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-09T00:03:06.182+0000 7f6c5ad0a000 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-09T00:03:06.419 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:03:06 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-09T00:03:06.241+0000 7f6c5ad0a000 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-09T00:03:06.419 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:03:06 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-09T00:03:06.357+0000 7f6c5ad0a000 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-09T00:03:06.419 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:06 vm04 sudo[76698]: ceph : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/sbin/smartctl -x --json=o /dev/vda 2026-03-09T00:03:06.419 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:06 vm04 sudo[76698]: pam_unix(sudo:session): session opened for user root by (uid=0) 2026-03-09T00:03:06.419 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:06 vm04 sudo[76698]: pam_unix(sudo:session): session closed for user root 2026-03-09T00:03:06.419 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:06 vm04 sudo[76692]: ceph : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/sbin/smartctl -x --json=o /dev/vda 2026-03-09T00:03:06.419 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:06 vm04 sudo[76692]: pam_unix(sudo:session): session opened for user root by (uid=0) 2026-03-09T00:03:06.419 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:06 vm04 sudo[76692]: pam_unix(sudo:session): session closed for user root 2026-03-09T00:03:06.712 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:03:06 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-09T00:03:06.417+0000 7f6c5ad0a000 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-09T00:03:06.712 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:03:06 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-09T00:03:06.519+0000 7f6c5ad0a000 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-09T00:03:06.712 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:03:06 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-09T00:03:06.625+0000 7f6c5ad0a000 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-09T00:03:06.991 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:06 vm04 ceph-mon[51053]: pgmap v5: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail 2026-03-09T00:03:06.991 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:06 vm04 ceph-mon[51053]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-09T00:03:06.991 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:06 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T00:03:06.991 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:06 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T00:03:06.991 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:06 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T00:03:06.991 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:06 vm04 ceph-mon[51053]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-09T00:03:06.991 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:06 vm04 ceph-mon[51053]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-09T00:03:06.991 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:06 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T00:03:06.991 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:06 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T00:03:06.991 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:06 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T00:03:06.991 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:06 vm04 ceph-mon[51053]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-09T00:03:06.991 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:06 vm04 ceph-mon[51053]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:06.991 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:06 vm04 ceph-mon[51053]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-09T00:03:06.991 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:06 vm04 ceph-mon[51053]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-09T00:03:06.991 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:06 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T00:03:06.991 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:06 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T00:03:06.991 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:06 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T00:03:06.991 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:06 vm04 ceph-mon[51053]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:06.991 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:06 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm04", "name": "osd_memory_target"}]: dispatch 2026-03-09T00:03:06.991 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:06 vm04 ceph-mon[51053]: from='mgr.24772 ' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm04", "name": "osd_memory_target"}]: dispatch 2026-03-09T00:03:06.991 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:06 vm04 ceph-mon[46823]: pgmap v5: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail 2026-03-09T00:03:06.992 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:06 vm04 ceph-mon[46823]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-09T00:03:06.992 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:06 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T00:03:06.992 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:06 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T00:03:06.992 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:06 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T00:03:06.992 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:06 vm04 ceph-mon[46823]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-09T00:03:06.992 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:06 vm04 ceph-mon[46823]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-09T00:03:06.992 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:06 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T00:03:06.992 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:06 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T00:03:06.992 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:06 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T00:03:06.992 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:06 vm04 ceph-mon[46823]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-09T00:03:06.992 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:06 vm04 ceph-mon[46823]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:06.992 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:06 vm04 ceph-mon[46823]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-09T00:03:06.992 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:06 vm04 ceph-mon[46823]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-09T00:03:06.992 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:06 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T00:03:06.992 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:06 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T00:03:06.992 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:06 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T00:03:06.992 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:06 vm04 ceph-mon[46823]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:06.992 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:06 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm04", "name": "osd_memory_target"}]: dispatch 2026-03-09T00:03:06.992 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:06 vm04 ceph-mon[46823]: from='mgr.24772 ' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm04", "name": "osd_memory_target"}]: dispatch 2026-03-09T00:03:07.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:06 vm10 sudo[69208]: ceph : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/sbin/smartctl -x --json=o /dev/vda 2026-03-09T00:03:07.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:06 vm10 sudo[69208]: pam_unix(sudo:session): session opened for user root by (uid=0) 2026-03-09T00:03:07.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:06 vm10 sudo[69208]: pam_unix(sudo:session): session closed for user root 2026-03-09T00:03:07.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:06 vm10 ceph-mon[48982]: pgmap v5: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail 2026-03-09T00:03:07.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:06 vm10 ceph-mon[48982]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-09T00:03:07.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:06 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T00:03:07.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:06 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T00:03:07.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:06 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T00:03:07.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:06 vm10 ceph-mon[48982]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-09T00:03:07.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:06 vm10 ceph-mon[48982]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-09T00:03:07.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:06 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T00:03:07.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:06 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T00:03:07.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:06 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T00:03:07.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:06 vm10 ceph-mon[48982]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-09T00:03:07.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:06 vm10 ceph-mon[48982]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:07.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:06 vm10 ceph-mon[48982]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-09T00:03:07.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:06 vm10 ceph-mon[48982]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-09T00:03:07.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:06 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T00:03:07.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:06 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T00:03:07.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:06 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T00:03:07.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:06 vm10 ceph-mon[48982]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:07.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:06 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm04", "name": "osd_memory_target"}]: dispatch 2026-03-09T00:03:07.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:06 vm10 ceph-mon[48982]: from='mgr.24772 ' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm04", "name": "osd_memory_target"}]: dispatch 2026-03-09T00:03:07.350 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:03:06 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-09T00:03:06.990+0000 7f6c5ad0a000 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-09T00:03:07.351 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:03:07 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-09T00:03:07.069+0000 7f6c5ad0a000 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-09T00:03:07.351 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:03:07 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: [09/Mar/2026:00:03:07] ENGINE Bus STARTING 2026-03-09T00:03:07.351 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:03:07 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: CherryPy Checker: 2026-03-09T00:03:07.351 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:03:07 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: The Application mounted at '' has an empty config. 2026-03-09T00:03:07.351 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:03:07 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: 2026-03-09T00:03:07.351 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:03:07 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: [09/Mar/2026:00:03:07] ENGINE Serving on http://:::9283 2026-03-09T00:03:07.351 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:03:07 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: [09/Mar/2026:00:03:07] ENGINE Bus STARTED 2026-03-09T00:03:07.662 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:03:07 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-09T00:03:07.395Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=5 err="Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-09T00:03:07.955 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 09 00:03:07 vm04 systemd[1]: Stopping Ceph node-exporter.a for fdcbddf6-1b49-11f1-80b0-7392062373f9... 2026-03-09T00:03:07.955 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:07 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:03:07.955 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:07 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:03:07.955 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:07 vm04 ceph-mon[51053]: Updating vm04:/etc/ceph/ceph.conf 2026-03-09T00:03:07.955 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:07 vm04 ceph-mon[51053]: Updating vm10:/etc/ceph/ceph.conf 2026-03-09T00:03:07.955 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:07 vm04 ceph-mon[51053]: Updating vm10:/var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/config/ceph.conf 2026-03-09T00:03:07.955 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:07 vm04 ceph-mon[51053]: Updating vm04:/var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/config/ceph.conf 2026-03-09T00:03:07.955 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:07 vm04 ceph-mon[51053]: Updating vm10:/etc/ceph/ceph.client.admin.keyring 2026-03-09T00:03:07.955 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:07 vm04 ceph-mon[51053]: Updating vm04:/etc/ceph/ceph.client.admin.keyring 2026-03-09T00:03:07.955 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:07 vm04 ceph-mon[51053]: Standby manager daemon y started 2026-03-09T00:03:07.955 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:07 vm04 ceph-mon[51053]: from='mgr.? 192.168.123.104:0/646001663' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/crt"}]: dispatch 2026-03-09T00:03:07.955 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:07 vm04 ceph-mon[51053]: from='mgr.? 192.168.123.104:0/646001663' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-09T00:03:07.955 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:07 vm04 ceph-mon[51053]: from='mgr.? 192.168.123.104:0/646001663' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/key"}]: dispatch 2026-03-09T00:03:07.955 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:07 vm04 ceph-mon[51053]: from='mgr.? 192.168.123.104:0/646001663' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-09T00:03:07.955 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:07 vm04 ceph-mon[51053]: Updating vm10:/var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/config/ceph.client.admin.keyring 2026-03-09T00:03:07.955 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:07 vm04 ceph-mon[51053]: Updating vm04:/var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/config/ceph.client.admin.keyring 2026-03-09T00:03:07.955 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:07 vm04 ceph-mon[51053]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:07.955 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:07 vm04 ceph-mon[51053]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:07.955 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:07 vm04 ceph-mon[51053]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:07.955 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:07 vm04 ceph-mon[51053]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:07.955 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:07 vm04 ceph-mon[51053]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:07.955 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:07 vm04 ceph-mon[51053]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:07.955 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:07 vm04 ceph-mon[51053]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:07.955 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:07 vm04 ceph-mon[51053]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:07.955 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:07 vm04 ceph-mon[51053]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:07.956 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:07 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:03:07.956 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:07 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:03:07.956 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:07 vm04 ceph-mon[46823]: Updating vm04:/etc/ceph/ceph.conf 2026-03-09T00:03:07.956 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:07 vm04 ceph-mon[46823]: Updating vm10:/etc/ceph/ceph.conf 2026-03-09T00:03:07.956 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:07 vm04 ceph-mon[46823]: Updating vm10:/var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/config/ceph.conf 2026-03-09T00:03:07.956 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:07 vm04 ceph-mon[46823]: Updating vm04:/var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/config/ceph.conf 2026-03-09T00:03:07.956 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:07 vm04 ceph-mon[46823]: Updating vm10:/etc/ceph/ceph.client.admin.keyring 2026-03-09T00:03:07.956 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:07 vm04 ceph-mon[46823]: Updating vm04:/etc/ceph/ceph.client.admin.keyring 2026-03-09T00:03:07.956 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:07 vm04 ceph-mon[46823]: Standby manager daemon y started 2026-03-09T00:03:07.956 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:07 vm04 ceph-mon[46823]: from='mgr.? 192.168.123.104:0/646001663' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/crt"}]: dispatch 2026-03-09T00:03:07.956 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:07 vm04 ceph-mon[46823]: from='mgr.? 192.168.123.104:0/646001663' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-09T00:03:07.956 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:07 vm04 ceph-mon[46823]: from='mgr.? 192.168.123.104:0/646001663' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/key"}]: dispatch 2026-03-09T00:03:07.956 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:07 vm04 ceph-mon[46823]: from='mgr.? 192.168.123.104:0/646001663' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-09T00:03:07.956 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:07 vm04 ceph-mon[46823]: Updating vm10:/var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/config/ceph.client.admin.keyring 2026-03-09T00:03:07.956 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:07 vm04 ceph-mon[46823]: Updating vm04:/var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/config/ceph.client.admin.keyring 2026-03-09T00:03:07.956 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:07 vm04 ceph-mon[46823]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:07.956 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:07 vm04 ceph-mon[46823]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:07.956 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:07 vm04 ceph-mon[46823]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:07.956 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:07 vm04 ceph-mon[46823]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:07.956 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:07 vm04 ceph-mon[46823]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:07.956 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:07 vm04 ceph-mon[46823]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:07.956 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:07 vm04 ceph-mon[46823]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:07.956 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:07 vm04 ceph-mon[46823]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:07.956 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:07 vm04 ceph-mon[46823]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:08.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:07 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:03:08.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:07 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:03:08.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:07 vm10 ceph-mon[48982]: Updating vm04:/etc/ceph/ceph.conf 2026-03-09T00:03:08.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:07 vm10 ceph-mon[48982]: Updating vm10:/etc/ceph/ceph.conf 2026-03-09T00:03:08.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:07 vm10 ceph-mon[48982]: Updating vm10:/var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/config/ceph.conf 2026-03-09T00:03:08.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:07 vm10 ceph-mon[48982]: Updating vm04:/var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/config/ceph.conf 2026-03-09T00:03:08.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:07 vm10 ceph-mon[48982]: Updating vm10:/etc/ceph/ceph.client.admin.keyring 2026-03-09T00:03:08.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:07 vm10 ceph-mon[48982]: Updating vm04:/etc/ceph/ceph.client.admin.keyring 2026-03-09T00:03:08.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:07 vm10 ceph-mon[48982]: Standby manager daemon y started 2026-03-09T00:03:08.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:07 vm10 ceph-mon[48982]: from='mgr.? 192.168.123.104:0/646001663' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/crt"}]: dispatch 2026-03-09T00:03:08.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:07 vm10 ceph-mon[48982]: from='mgr.? 192.168.123.104:0/646001663' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-09T00:03:08.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:07 vm10 ceph-mon[48982]: from='mgr.? 192.168.123.104:0/646001663' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/key"}]: dispatch 2026-03-09T00:03:08.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:07 vm10 ceph-mon[48982]: from='mgr.? 192.168.123.104:0/646001663' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-09T00:03:08.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:07 vm10 ceph-mon[48982]: Updating vm10:/var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/config/ceph.client.admin.keyring 2026-03-09T00:03:08.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:07 vm10 ceph-mon[48982]: Updating vm04:/var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/config/ceph.client.admin.keyring 2026-03-09T00:03:08.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:07 vm10 ceph-mon[48982]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:08.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:07 vm10 ceph-mon[48982]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:08.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:07 vm10 ceph-mon[48982]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:08.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:07 vm10 ceph-mon[48982]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:08.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:07 vm10 ceph-mon[48982]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:08.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:07 vm10 ceph-mon[48982]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:08.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:07 vm10 ceph-mon[48982]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:08.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:07 vm10 ceph-mon[48982]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:08.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:07 vm10 ceph-mon[48982]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:08.213 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 09 00:03:07 vm04 podman[77513]: 2026-03-09 00:03:07.954534852 +0000 UTC m=+0.018268425 container died ff6d0adb33b76901daedc9dc18647e2a27f7e667d5ba9c47e9219040edd50abe (image=quay.io/prometheus/node-exporter:v1.3.1, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a, maintainer=The Prometheus Authors ) 2026-03-09T00:03:08.213 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 09 00:03:07 vm04 podman[77513]: 2026-03-09 00:03:07.977889551 +0000 UTC m=+0.041623135 container remove ff6d0adb33b76901daedc9dc18647e2a27f7e667d5ba9c47e9219040edd50abe (image=quay.io/prometheus/node-exporter:v1.3.1, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a, maintainer=The Prometheus Authors ) 2026-03-09T00:03:08.213 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 09 00:03:07 vm04 bash[77513]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a 2026-03-09T00:03:08.213 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 09 00:03:07 vm04 systemd[1]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@node-exporter.a.service: Main process exited, code=exited, status=143/n/a 2026-03-09T00:03:08.213 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 09 00:03:08 vm04 systemd[1]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@node-exporter.a.service: Failed with result 'exit-code'. 2026-03-09T00:03:08.213 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 09 00:03:08 vm04 systemd[1]: Stopped Ceph node-exporter.a for fdcbddf6-1b49-11f1-80b0-7392062373f9. 2026-03-09T00:03:08.213 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 09 00:03:08 vm04 systemd[1]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@node-exporter.a.service: Consumed 1.381s CPU time. 2026-03-09T00:03:08.558 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 09 00:03:08 vm04 systemd[1]: Starting Ceph node-exporter.a for fdcbddf6-1b49-11f1-80b0-7392062373f9... 2026-03-09T00:03:08.558 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 09 00:03:08 vm04 bash[77619]: Trying to pull quay.io/prometheus/node-exporter:v1.7.0... 2026-03-09T00:03:08.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:08 vm04 ceph-mon[51053]: Reconfiguring node-exporter.a (dependencies changed)... 2026-03-09T00:03:08.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:08 vm04 ceph-mon[51053]: Deploying daemon node-exporter.a on vm04 2026-03-09T00:03:08.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:08 vm04 ceph-mon[51053]: pgmap v6: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail 2026-03-09T00:03:08.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:08 vm04 ceph-mon[51053]: mgrmap e25: x(active, since 7s), standbys: y 2026-03-09T00:03:08.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:08 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-09T00:03:08.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:08 vm04 ceph-mon[46823]: Reconfiguring node-exporter.a (dependencies changed)... 2026-03-09T00:03:08.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:08 vm04 ceph-mon[46823]: Deploying daemon node-exporter.a on vm04 2026-03-09T00:03:08.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:08 vm04 ceph-mon[46823]: pgmap v6: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail 2026-03-09T00:03:08.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:08 vm04 ceph-mon[46823]: mgrmap e25: x(active, since 7s), standbys: y 2026-03-09T00:03:08.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:08 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-09T00:03:09.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:08 vm10 ceph-mon[48982]: Reconfiguring node-exporter.a (dependencies changed)... 2026-03-09T00:03:09.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:08 vm10 ceph-mon[48982]: Deploying daemon node-exporter.a on vm04 2026-03-09T00:03:09.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:08 vm10 ceph-mon[48982]: pgmap v6: 161 pgs: 161 active+clean; 457 KiB data, 84 MiB used, 160 GiB / 160 GiB avail 2026-03-09T00:03:09.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:08 vm10 ceph-mon[48982]: mgrmap e25: x(active, since 7s), standbys: y 2026-03-09T00:03:09.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:08 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-09T00:03:09.078 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:03:08 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:03:08] "GET /metrics HTTP/1.1" 200 34539 "" "Prometheus/2.33.4" 2026-03-09T00:03:09.305 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:03:09 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:03:09] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-09T00:03:09.600 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:03:09 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-09T00:03:09.305Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=6 err="Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-09T00:03:10.100 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 09 00:03:09 vm04 bash[77619]: Getting image source signatures 2026-03-09T00:03:10.100 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 09 00:03:09 vm04 bash[77619]: Copying blob sha256:324153f2810a9927fcce320af9e4e291e0b6e805cbdd1f338386c756b9defa24 2026-03-09T00:03:10.100 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 09 00:03:09 vm04 bash[77619]: Copying blob sha256:2abcce694348cd2c949c0e98a7400ebdfd8341021bcf6b541bc72033ce982510 2026-03-09T00:03:10.100 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 09 00:03:09 vm04 bash[77619]: Copying blob sha256:455fd88e5221bc1e278ef2d059cd70e4df99a24e5af050ede621534276f6cf9a 2026-03-09T00:03:11.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:10 vm10 ceph-mon[48982]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:03:11.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:10 vm10 ceph-mon[48982]: pgmap v7: 161 pgs: 161 active+clean; 457 KiB data, 85 MiB used, 160 GiB / 160 GiB avail; 29 KiB/s rd, 38 KiB/s wr, 16 op/s 2026-03-09T00:03:11.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:10 vm10 ceph-mon[48982]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:11.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:10 vm04 ceph-mon[51053]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:03:11.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:10 vm04 ceph-mon[51053]: pgmap v7: 161 pgs: 161 active+clean; 457 KiB data, 85 MiB used, 160 GiB / 160 GiB avail; 29 KiB/s rd, 38 KiB/s wr, 16 op/s 2026-03-09T00:03:11.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:10 vm04 ceph-mon[51053]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:11.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:10 vm04 ceph-mon[46823]: from='client.24515 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:03:11.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:10 vm04 ceph-mon[46823]: pgmap v7: 161 pgs: 161 active+clean; 457 KiB data, 85 MiB used, 160 GiB / 160 GiB avail; 29 KiB/s rd, 38 KiB/s wr, 16 op/s 2026-03-09T00:03:11.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:10 vm04 ceph-mon[46823]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:11.101 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 09 00:03:10 vm04 bash[77619]: Copying config sha256:72c9c208898624938c9e4183d6686ea4a5fd3f912bc29bc3f00147924c521a3e 2026-03-09T00:03:11.101 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 09 00:03:10 vm04 bash[77619]: Writing manifest to image destination 2026-03-09T00:03:11.101 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 09 00:03:10 vm04 podman[77619]: 2026-03-09 00:03:10.675906501 +0000 UTC m=+2.362340623 container create 38e0af6b2fbf68d04000e9cd6e5871604de837a347722e4c9ec49bffb96b81a9 (image=quay.io/prometheus/node-exporter:v1.7.0, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a, maintainer=The Prometheus Authors ) 2026-03-09T00:03:11.101 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 09 00:03:10 vm04 podman[77619]: 2026-03-09 00:03:10.70477729 +0000 UTC m=+2.391211422 container init 38e0af6b2fbf68d04000e9cd6e5871604de837a347722e4c9ec49bffb96b81a9 (image=quay.io/prometheus/node-exporter:v1.7.0, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a, maintainer=The Prometheus Authors ) 2026-03-09T00:03:11.101 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 09 00:03:10 vm04 podman[77619]: 2026-03-09 00:03:10.707308105 +0000 UTC m=+2.393742227 container start 38e0af6b2fbf68d04000e9cd6e5871604de837a347722e4c9ec49bffb96b81a9 (image=quay.io/prometheus/node-exporter:v1.7.0, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a, maintainer=The Prometheus Authors ) 2026-03-09T00:03:11.101 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 09 00:03:10 vm04 bash[77619]: 38e0af6b2fbf68d04000e9cd6e5871604de837a347722e4c9ec49bffb96b81a9 2026-03-09T00:03:11.101 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 09 00:03:10 vm04 podman[77619]: 2026-03-09 00:03:10.669813363 +0000 UTC m=+2.356247495 image pull 72c9c208898624938c9e4183d6686ea4a5fd3f912bc29bc3f00147924c521a3e quay.io/prometheus/node-exporter:v1.7.0 2026-03-09T00:03:11.101 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 09 00:03:10 vm04 systemd[1]: Started Ceph node-exporter.a for fdcbddf6-1b49-11f1-80b0-7392062373f9. 2026-03-09T00:03:11.101 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 09 00:03:10 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[77680]: ts=2026-03-09T00:03:10.715Z caller=node_exporter.go:192 level=info msg="Starting node_exporter" version="(version=1.7.0, branch=HEAD, revision=7333465abf9efba81876303bb57e6fadb946041b)" 2026-03-09T00:03:11.101 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 09 00:03:10 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[77680]: ts=2026-03-09T00:03:10.715Z caller=node_exporter.go:193 level=info msg="Build context" build_context="(go=go1.21.4, platform=linux/amd64, user=root@35918982f6d8, date=20231112-23:53:35, tags=netgo osusergo static_build)" 2026-03-09T00:03:11.101 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 09 00:03:10 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[77680]: ts=2026-03-09T00:03:10.716Z caller=filesystem_common.go:111 level=info collector=filesystem msg="Parsed flag --collector.filesystem.mount-points-exclude" flag=^/(dev|proc|run/credentials/.+|sys|var/lib/docker/.+|var/lib/containers/storage/.+)($|/) 2026-03-09T00:03:11.101 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 09 00:03:10 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[77680]: ts=2026-03-09T00:03:10.716Z caller=filesystem_common.go:113 level=info collector=filesystem msg="Parsed flag --collector.filesystem.fs-types-exclude" flag=^(autofs|binfmt_misc|bpf|cgroup2?|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|iso9660|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|sysfs|tracefs)$ 2026-03-09T00:03:11.101 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 09 00:03:10 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[77680]: ts=2026-03-09T00:03:10.716Z caller=diskstats_common.go:111 level=info collector=diskstats msg="Parsed flag --collector.diskstats.device-exclude" flag=^(ram|loop|fd|(h|s|v|xv)d[a-z]|nvme\d+n\d+p)\d+$ 2026-03-09T00:03:11.101 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 09 00:03:10 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[77680]: ts=2026-03-09T00:03:10.716Z caller=diskstats_linux.go:265 level=error collector=diskstats msg="Failed to open directory, disabling udev device properties" path=/run/udev/data 2026-03-09T00:03:11.101 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 09 00:03:10 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[77680]: ts=2026-03-09T00:03:10.716Z caller=node_exporter.go:110 level=info msg="Enabled collectors" 2026-03-09T00:03:11.101 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 09 00:03:10 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[77680]: ts=2026-03-09T00:03:10.716Z caller=node_exporter.go:117 level=info collector=arp 2026-03-09T00:03:11.101 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 09 00:03:10 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[77680]: ts=2026-03-09T00:03:10.716Z caller=node_exporter.go:117 level=info collector=bcache 2026-03-09T00:03:11.102 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 09 00:03:10 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[77680]: ts=2026-03-09T00:03:10.716Z caller=node_exporter.go:117 level=info collector=bonding 2026-03-09T00:03:11.102 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 09 00:03:10 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[77680]: ts=2026-03-09T00:03:10.716Z caller=node_exporter.go:117 level=info collector=btrfs 2026-03-09T00:03:11.102 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 09 00:03:10 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[77680]: ts=2026-03-09T00:03:10.716Z caller=node_exporter.go:117 level=info collector=conntrack 2026-03-09T00:03:11.102 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 09 00:03:10 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[77680]: ts=2026-03-09T00:03:10.716Z caller=node_exporter.go:117 level=info collector=cpu 2026-03-09T00:03:11.102 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 09 00:03:10 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[77680]: ts=2026-03-09T00:03:10.716Z caller=node_exporter.go:117 level=info collector=cpufreq 2026-03-09T00:03:11.102 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 09 00:03:10 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[77680]: ts=2026-03-09T00:03:10.716Z caller=node_exporter.go:117 level=info collector=diskstats 2026-03-09T00:03:11.102 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 09 00:03:10 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[77680]: ts=2026-03-09T00:03:10.716Z caller=node_exporter.go:117 level=info collector=dmi 2026-03-09T00:03:11.102 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 09 00:03:10 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[77680]: ts=2026-03-09T00:03:10.716Z caller=node_exporter.go:117 level=info collector=edac 2026-03-09T00:03:11.102 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 09 00:03:10 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[77680]: ts=2026-03-09T00:03:10.716Z caller=node_exporter.go:117 level=info collector=entropy 2026-03-09T00:03:11.102 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 09 00:03:10 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[77680]: ts=2026-03-09T00:03:10.716Z caller=node_exporter.go:117 level=info collector=fibrechannel 2026-03-09T00:03:11.102 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 09 00:03:10 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[77680]: ts=2026-03-09T00:03:10.716Z caller=node_exporter.go:117 level=info collector=filefd 2026-03-09T00:03:11.102 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 09 00:03:10 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[77680]: ts=2026-03-09T00:03:10.716Z caller=node_exporter.go:117 level=info collector=filesystem 2026-03-09T00:03:11.102 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 09 00:03:10 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[77680]: ts=2026-03-09T00:03:10.716Z caller=node_exporter.go:117 level=info collector=hwmon 2026-03-09T00:03:11.102 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 09 00:03:10 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[77680]: ts=2026-03-09T00:03:10.716Z caller=node_exporter.go:117 level=info collector=infiniband 2026-03-09T00:03:11.102 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 09 00:03:10 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[77680]: ts=2026-03-09T00:03:10.716Z caller=node_exporter.go:117 level=info collector=ipvs 2026-03-09T00:03:11.102 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 09 00:03:10 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[77680]: ts=2026-03-09T00:03:10.716Z caller=node_exporter.go:117 level=info collector=loadavg 2026-03-09T00:03:11.102 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 09 00:03:10 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[77680]: ts=2026-03-09T00:03:10.716Z caller=node_exporter.go:117 level=info collector=mdadm 2026-03-09T00:03:11.102 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 09 00:03:10 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[77680]: ts=2026-03-09T00:03:10.716Z caller=node_exporter.go:117 level=info collector=meminfo 2026-03-09T00:03:11.102 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 09 00:03:10 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[77680]: ts=2026-03-09T00:03:10.716Z caller=node_exporter.go:117 level=info collector=netclass 2026-03-09T00:03:11.102 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 09 00:03:10 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[77680]: ts=2026-03-09T00:03:10.716Z caller=node_exporter.go:117 level=info collector=netdev 2026-03-09T00:03:11.102 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 09 00:03:10 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[77680]: ts=2026-03-09T00:03:10.716Z caller=node_exporter.go:117 level=info collector=netstat 2026-03-09T00:03:11.102 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 09 00:03:10 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[77680]: ts=2026-03-09T00:03:10.716Z caller=node_exporter.go:117 level=info collector=nfs 2026-03-09T00:03:11.102 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 09 00:03:10 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[77680]: ts=2026-03-09T00:03:10.716Z caller=node_exporter.go:117 level=info collector=nfsd 2026-03-09T00:03:11.102 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 09 00:03:10 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[77680]: ts=2026-03-09T00:03:10.716Z caller=node_exporter.go:117 level=info collector=nvme 2026-03-09T00:03:11.102 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 09 00:03:10 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[77680]: ts=2026-03-09T00:03:10.716Z caller=node_exporter.go:117 level=info collector=os 2026-03-09T00:03:11.102 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 09 00:03:10 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[77680]: ts=2026-03-09T00:03:10.716Z caller=node_exporter.go:117 level=info collector=powersupplyclass 2026-03-09T00:03:11.102 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 09 00:03:10 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[77680]: ts=2026-03-09T00:03:10.716Z caller=node_exporter.go:117 level=info collector=pressure 2026-03-09T00:03:11.102 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 09 00:03:10 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[77680]: ts=2026-03-09T00:03:10.716Z caller=node_exporter.go:117 level=info collector=rapl 2026-03-09T00:03:11.102 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 09 00:03:10 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[77680]: ts=2026-03-09T00:03:10.716Z caller=node_exporter.go:117 level=info collector=schedstat 2026-03-09T00:03:11.102 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 09 00:03:10 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[77680]: ts=2026-03-09T00:03:10.716Z caller=node_exporter.go:117 level=info collector=selinux 2026-03-09T00:03:11.102 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 09 00:03:10 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[77680]: ts=2026-03-09T00:03:10.716Z caller=node_exporter.go:117 level=info collector=sockstat 2026-03-09T00:03:11.102 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 09 00:03:10 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[77680]: ts=2026-03-09T00:03:10.716Z caller=node_exporter.go:117 level=info collector=softnet 2026-03-09T00:03:11.102 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 09 00:03:10 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[77680]: ts=2026-03-09T00:03:10.716Z caller=node_exporter.go:117 level=info collector=stat 2026-03-09T00:03:11.102 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 09 00:03:10 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[77680]: ts=2026-03-09T00:03:10.716Z caller=node_exporter.go:117 level=info collector=tapestats 2026-03-09T00:03:11.102 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 09 00:03:10 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[77680]: ts=2026-03-09T00:03:10.716Z caller=node_exporter.go:117 level=info collector=textfile 2026-03-09T00:03:11.102 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 09 00:03:10 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[77680]: ts=2026-03-09T00:03:10.716Z caller=node_exporter.go:117 level=info collector=thermal_zone 2026-03-09T00:03:11.102 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 09 00:03:10 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[77680]: ts=2026-03-09T00:03:10.716Z caller=node_exporter.go:117 level=info collector=time 2026-03-09T00:03:11.102 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 09 00:03:10 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[77680]: ts=2026-03-09T00:03:10.716Z caller=node_exporter.go:117 level=info collector=udp_queues 2026-03-09T00:03:11.102 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 09 00:03:10 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[77680]: ts=2026-03-09T00:03:10.716Z caller=node_exporter.go:117 level=info collector=uname 2026-03-09T00:03:11.102 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 09 00:03:10 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[77680]: ts=2026-03-09T00:03:10.716Z caller=node_exporter.go:117 level=info collector=vmstat 2026-03-09T00:03:11.102 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 09 00:03:10 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[77680]: ts=2026-03-09T00:03:10.716Z caller=node_exporter.go:117 level=info collector=xfs 2026-03-09T00:03:11.102 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 09 00:03:10 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[77680]: ts=2026-03-09T00:03:10.716Z caller=node_exporter.go:117 level=info collector=zfs 2026-03-09T00:03:11.103 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 09 00:03:10 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[77680]: ts=2026-03-09T00:03:10.717Z caller=tls_config.go:274 level=info msg="Listening on" address=[::]:9100 2026-03-09T00:03:11.103 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 09 00:03:10 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a[77680]: ts=2026-03-09T00:03:10.717Z caller=tls_config.go:277 level=info msg="TLS is disabled." http2=false address=[::]:9100 2026-03-09T00:03:12.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:11 vm10 ceph-mon[48982]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:12.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:11 vm10 ceph-mon[48982]: Reconfiguring alertmanager.a (dependencies changed)... 2026-03-09T00:03:12.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:11 vm10 ceph-mon[48982]: Deploying daemon alertmanager.a on vm04 2026-03-09T00:03:12.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:11 vm04 ceph-mon[51053]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:12.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:11 vm04 ceph-mon[51053]: Reconfiguring alertmanager.a (dependencies changed)... 2026-03-09T00:03:12.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:11 vm04 ceph-mon[51053]: Deploying daemon alertmanager.a on vm04 2026-03-09T00:03:12.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:11 vm04 ceph-mon[46823]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:12.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:11 vm04 ceph-mon[46823]: Reconfiguring alertmanager.a (dependencies changed)... 2026-03-09T00:03:12.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:11 vm04 ceph-mon[46823]: Deploying daemon alertmanager.a on vm04 2026-03-09T00:03:13.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:13 vm04 ceph-mon[51053]: pgmap v8: 161 pgs: 161 active+clean; 457 KiB data, 85 MiB used, 160 GiB / 160 GiB avail; 22 KiB/s rd, 29 KiB/s wr, 12 op/s 2026-03-09T00:03:13.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:13 vm04 ceph-mon[46823]: pgmap v8: 161 pgs: 161 active+clean; 457 KiB data, 85 MiB used, 160 GiB / 160 GiB avail; 22 KiB/s rd, 29 KiB/s wr, 12 op/s 2026-03-09T00:03:13.601 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:03:13 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=error ts=2026-03-09T00:03:13.540Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-09T00:03:13.601 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:03:13 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=error ts=2026-03-09T00:03:13.540Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-09T00:03:13.601 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:03:13 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-09T00:03:13.542Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-09T00:03:13.601 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:03:13 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-09T00:03:13.542Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-09T00:03:13.601 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:03:13 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-09T00:03:13.543Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.104:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.104 because it doesn't contain any IP SANs" 2026-03-09T00:03:13.601 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:03:13 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=warn ts=2026-03-09T00:03:13.543Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.110:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.110 because it doesn't contain any IP SANs" 2026-03-09T00:03:13.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:13 vm10 ceph-mon[48982]: pgmap v8: 161 pgs: 161 active+clean; 457 KiB data, 85 MiB used, 160 GiB / 160 GiB avail; 22 KiB/s rd, 29 KiB/s wr, 12 op/s 2026-03-09T00:03:13.976 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:03:13 vm04 systemd[1]: Stopping Ceph alertmanager.a for fdcbddf6-1b49-11f1-80b0-7392062373f9... 2026-03-09T00:03:14.239 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:03:14 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[71796]: level=info ts=2026-03-09T00:03:14.052Z caller=main.go:557 msg="Received SIGTERM, exiting gracefully..." 2026-03-09T00:03:14.239 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:03:14 vm04 podman[77948]: 2026-03-09 00:03:14.063435099 +0000 UTC m=+0.027076346 container died 77a61b512c93e705733ba9dc92af6207681afabdae4d63dee204546bcb635ab7 (image=quay.io/prometheus/alertmanager:v0.23.0, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a, maintainer=The Prometheus Authors ) 2026-03-09T00:03:14.239 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:03:14 vm04 podman[77948]: 2026-03-09 00:03:14.178805945 +0000 UTC m=+0.142447182 container remove 77a61b512c93e705733ba9dc92af6207681afabdae4d63dee204546bcb635ab7 (image=quay.io/prometheus/alertmanager:v0.23.0, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a, maintainer=The Prometheus Authors ) 2026-03-09T00:03:14.239 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:03:14 vm04 podman[77948]: 2026-03-09 00:03:14.18064276 +0000 UTC m=+0.144284007 volume remove 3456f770698786c6551bea6918a8bdb294854f0d4690afa6a6d170bda388a7b3 2026-03-09T00:03:14.239 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:03:14 vm04 bash[77948]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a 2026-03-09T00:03:14.516 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:03:14 vm04 systemd[1]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@alertmanager.a.service: Deactivated successfully. 2026-03-09T00:03:14.516 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:03:14 vm04 systemd[1]: Stopped Ceph alertmanager.a for fdcbddf6-1b49-11f1-80b0-7392062373f9. 2026-03-09T00:03:14.516 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:03:14 vm04 systemd[1]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@alertmanager.a.service: Consumed 1.157s CPU time. 2026-03-09T00:03:14.516 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:03:14 vm04 systemd[1]: Starting Ceph alertmanager.a for fdcbddf6-1b49-11f1-80b0-7392062373f9... 2026-03-09T00:03:14.785 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:14 vm04 ceph-mon[51053]: pgmap v9: 161 pgs: 161 active+clean; 457 KiB data, 85 MiB used, 160 GiB / 160 GiB avail; 19 KiB/s rd, 24 KiB/s wr, 10 op/s 2026-03-09T00:03:14.787 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:14 vm04 ceph-mon[46823]: pgmap v9: 161 pgs: 161 active+clean; 457 KiB data, 85 MiB used, 160 GiB / 160 GiB avail; 19 KiB/s rd, 24 KiB/s wr, 10 op/s 2026-03-09T00:03:14.787 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:03:14 vm04 podman[78053]: 2026-03-09 00:03:14.534801396 +0000 UTC m=+0.019084611 volume create de656a11509b9d5d091140184efa8f0f771ed30a8c2c691c77d0569313c1fc73 2026-03-09T00:03:14.787 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:03:14 vm04 podman[78053]: 2026-03-09 00:03:14.537354511 +0000 UTC m=+0.021637736 container create bcac0140b0f61e75c97dfcae8b262e1d44947399e56734f96288fcd04ed24163 (image=quay.io/prometheus/alertmanager:v0.25.0, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a, maintainer=The Prometheus Authors ) 2026-03-09T00:03:14.787 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:03:14 vm04 podman[78053]: 2026-03-09 00:03:14.564468918 +0000 UTC m=+0.048752152 container init bcac0140b0f61e75c97dfcae8b262e1d44947399e56734f96288fcd04ed24163 (image=quay.io/prometheus/alertmanager:v0.25.0, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a, maintainer=The Prometheus Authors ) 2026-03-09T00:03:14.787 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:03:14 vm04 podman[78053]: 2026-03-09 00:03:14.566981998 +0000 UTC m=+0.051265223 container start bcac0140b0f61e75c97dfcae8b262e1d44947399e56734f96288fcd04ed24163 (image=quay.io/prometheus/alertmanager:v0.25.0, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a, maintainer=The Prometheus Authors ) 2026-03-09T00:03:14.787 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:03:14 vm04 bash[78053]: bcac0140b0f61e75c97dfcae8b262e1d44947399e56734f96288fcd04ed24163 2026-03-09T00:03:14.788 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:03:14 vm04 podman[78053]: 2026-03-09 00:03:14.528903236 +0000 UTC m=+0.013186471 image pull c8568f914cd25b2062c44e9f79f9c18da6e3b85fe0c47a12a2191c61426c2b19 quay.io/prometheus/alertmanager:v0.25.0 2026-03-09T00:03:14.788 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:03:14 vm04 systemd[1]: Started Ceph alertmanager.a for fdcbddf6-1b49-11f1-80b0-7392062373f9. 2026-03-09T00:03:14.788 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:03:14 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:03:14.584Z caller=main.go:240 level=info msg="Starting Alertmanager" version="(version=0.25.0, branch=HEAD, revision=258fab7cdd551f2cf251ed0348f0ad7289aee789)" 2026-03-09T00:03:14.788 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:03:14 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:03:14.584Z caller=main.go:241 level=info build_context="(go=go1.19.4, user=root@abe866dd5717, date=20221222-14:51:36)" 2026-03-09T00:03:14.788 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:03:14 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:03:14.586Z caller=cluster.go:185 level=info component=cluster msg="setting advertise address explicitly" addr=192.168.123.104 port=9094 2026-03-09T00:03:14.788 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:03:14 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:03:14.590Z caller=cluster.go:681 level=info component=cluster msg="Waiting for gossip to settle..." interval=2s 2026-03-09T00:03:14.788 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:03:14 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:03:14.611Z caller=coordinator.go:113 level=info component=configuration msg="Loading configuration file" file=/etc/alertmanager/alertmanager.yml 2026-03-09T00:03:14.788 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:03:14 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:03:14.611Z caller=coordinator.go:126 level=info component=configuration msg="Completed loading of configuration file" file=/etc/alertmanager/alertmanager.yml 2026-03-09T00:03:14.788 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:03:14 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:03:14.613Z caller=tls_config.go:232 level=info msg="Listening on" address=[::]:9093 2026-03-09T00:03:14.788 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:03:14 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:03:14.613Z caller=tls_config.go:235 level=info msg="TLS is disabled." http2=false address=[::]:9093 2026-03-09T00:03:14.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:14 vm10 ceph-mon[48982]: pgmap v9: 161 pgs: 161 active+clean; 457 KiB data, 85 MiB used, 160 GiB / 160 GiB avail; 19 KiB/s rd, 24 KiB/s wr, 10 op/s 2026-03-09T00:03:15.418 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:03:15 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: 2026-03-09T00:03:15.114+0000 7fb1697c6640 -1 log_channel(cephadm) log [ERR] : cephadm exited with an error code: 1, stderr: Reconfig daemon iscsi.foo.vm04.fbyciv ... 2026-03-09T00:03:15.418 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:03:15 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: Creating ceph-iscsi config... 2026-03-09T00:03:15.418 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:03:15 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: Write file: /var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/iscsi.foo.vm04.fbyciv/iscsi-gateway.cfg 2026-03-09T00:03:15.418 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:03:15 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: Write file: /var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/iscsi.foo.vm04.fbyciv/tcmu-runner-entrypoint.sh 2026-03-09T00:03:15.418 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:03:15 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: Non-zero exit code 1 from systemctl restart ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv 2026-03-09T00:03:15.418 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:03:15 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: systemctl: stderr Job for ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service failed because the control process exited with error code. 2026-03-09T00:03:15.418 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:03:15 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: systemctl: stderr See "systemctl status ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" and "journalctl -xeu ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" for details. 2026-03-09T00:03:15.418 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:03:15 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: Traceback (most recent call last): 2026-03-09T00:03:15.419 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:03:15 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-09T00:03:15.419 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:03:15 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: return _run_code(code, main_globals, None, 2026-03-09T00:03:15.419 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:03:15 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-09T00:03:15.419 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:03:15 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: exec(code, run_globals) 2026-03-09T00:03:15.419 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:03:15 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5592, in 2026-03-09T00:03:15.419 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:03:15 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5580, in main 2026-03-09T00:03:15.419 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:03:15 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3062, in command_deploy_from 2026-03-09T00:03:15.419 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:03:15 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3097, in _common_deploy 2026-03-09T00:03:15.419 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:03:15 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3117, in _deploy_daemon_container 2026-03-09T00:03:15.419 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:03:15 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 1126, in deploy_daemon 2026-03-09T00:03:15.419 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:03:15 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: File "/tmp/tmpbhp06bz6.cephadm.build/app/cephadmlib/call_wrappers.py", line 307, in call_throws 2026-03-09T00:03:15.419 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:03:15 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: RuntimeError: Failed command: systemctl restart ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv: Job for ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service failed because the control process exited with error code. 2026-03-09T00:03:15.419 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:03:15 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: See "systemctl status ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" and "journalctl -xeu ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" for details. 2026-03-09T00:03:15.419 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:03:15 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: Traceback (most recent call last): 2026-03-09T00:03:15.419 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:03:15 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1145, in _check_daemons 2026-03-09T00:03:15.419 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:03:15 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: self.mgr._daemon_action(daemon_spec, action=action) 2026-03-09T00:03:15.419 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:03:15 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: File "/usr/share/ceph/mgr/cephadm/module.py", line 2545, in _daemon_action 2026-03-09T00:03:15.419 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:03:15 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: return self.wait_async( 2026-03-09T00:03:15.419 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:03:15 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: File "/usr/share/ceph/mgr/cephadm/module.py", line 815, in wait_async 2026-03-09T00:03:15.419 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:03:15 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: return self.event_loop.get_result(coro, timeout) 2026-03-09T00:03:15.419 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:03:15 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: File "/usr/share/ceph/mgr/cephadm/ssh.py", line 136, in get_result 2026-03-09T00:03:15.419 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:03:15 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: return future.result(timeout) 2026-03-09T00:03:15.419 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:03:15 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: File "/lib64/python3.9/concurrent/futures/_base.py", line 446, in result 2026-03-09T00:03:15.419 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:03:15 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: return self.__get_result() 2026-03-09T00:03:15.419 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:03:15 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: File "/lib64/python3.9/concurrent/futures/_base.py", line 391, in __get_result 2026-03-09T00:03:15.419 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:03:15 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: raise self._exception 2026-03-09T00:03:15.419 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:03:15 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1381, in _create_daemon 2026-03-09T00:03:15.419 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:03:15 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: out, err, code = await self._run_cephadm( 2026-03-09T00:03:15.419 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:03:15 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1724, in _run_cephadm 2026-03-09T00:03:15.419 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:03:15 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: raise OrchestratorError( 2026-03-09T00:03:15.419 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:03:15 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: orchestrator._interface.OrchestratorError: cephadm exited with an error code: 1, stderr: Reconfig daemon iscsi.foo.vm04.fbyciv ... 2026-03-09T00:03:15.419 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:03:15 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: Creating ceph-iscsi config... 2026-03-09T00:03:15.419 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:03:15 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: Write file: /var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/iscsi.foo.vm04.fbyciv/iscsi-gateway.cfg 2026-03-09T00:03:15.419 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:03:15 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: Write file: /var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/iscsi.foo.vm04.fbyciv/tcmu-runner-entrypoint.sh 2026-03-09T00:03:15.419 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:03:15 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: Non-zero exit code 1 from systemctl restart ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv 2026-03-09T00:03:15.419 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:03:15 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: systemctl: stderr Job for ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service failed because the control process exited with error code. 2026-03-09T00:03:15.419 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:03:15 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: systemctl: stderr See "systemctl status ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" and "journalctl -xeu ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" for details. 2026-03-09T00:03:15.419 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:03:15 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: Traceback (most recent call last): 2026-03-09T00:03:15.419 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:03:15 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-09T00:03:15.419 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:03:15 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: return _run_code(code, main_globals, None, 2026-03-09T00:03:15.419 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:03:15 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-09T00:03:15.419 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:03:15 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: exec(code, run_globals) 2026-03-09T00:03:15.419 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:03:15 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5592, in 2026-03-09T00:03:15.419 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:03:15 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5580, in main 2026-03-09T00:03:15.419 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:03:15 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3062, in command_deploy_from 2026-03-09T00:03:15.419 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:03:15 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3097, in _common_deploy 2026-03-09T00:03:15.419 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:03:15 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3117, in _deploy_daemon_container 2026-03-09T00:03:15.419 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:03:15 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 1126, in deploy_daemon 2026-03-09T00:03:15.419 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:03:15 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: File "/tmp/tmpbhp06bz6.cephadm.build/app/cephadmlib/call_wrappers.py", line 307, in call_throws 2026-03-09T00:03:15.419 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:03:15 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: RuntimeError: Failed command: systemctl restart ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv: Job for ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service failed because the control process exited with error code. 2026-03-09T00:03:15.419 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:03:15 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: See "systemctl status ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" and "journalctl -xeu ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" for details. 2026-03-09T00:03:15.684 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:15 vm10 ceph-mon[48982]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:15.684 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:15 vm10 ceph-mon[48982]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:15.684 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:15 vm10 ceph-mon[48982]: Reconfiguring iscsi.foo.vm04.fbyciv (dependencies changed)... 2026-03-09T00:03:15.684 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:15 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm04.fbyciv", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T00:03:15.684 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:15 vm10 ceph-mon[48982]: from='mgr.24772 ' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm04.fbyciv", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T00:03:15.685 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:15 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:03:15.685 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:15 vm10 ceph-mon[48982]: Reconfiguring daemon iscsi.foo.vm04.fbyciv on vm04 2026-03-09T00:03:15.685 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:15 vm10 ceph-mon[48982]: cephadm exited with an error code: 1, stderr: Reconfig daemon iscsi.foo.vm04.fbyciv ... 2026-03-09T00:03:15.685 INFO:journalctl@ceph.mon.b.vm10.stdout: Creating ceph-iscsi config... 2026-03-09T00:03:15.685 INFO:journalctl@ceph.mon.b.vm10.stdout: Write file: /var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/iscsi.foo.vm04.fbyciv/iscsi-gateway.cfg 2026-03-09T00:03:15.685 INFO:journalctl@ceph.mon.b.vm10.stdout: Write file: /var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/iscsi.foo.vm04.fbyciv/tcmu-runner-entrypoint.sh 2026-03-09T00:03:15.685 INFO:journalctl@ceph.mon.b.vm10.stdout: Non-zero exit code 1 from systemctl restart ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv 2026-03-09T00:03:15.685 INFO:journalctl@ceph.mon.b.vm10.stdout: systemctl: stderr Job for ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service failed because the control process exited with error code. 2026-03-09T00:03:15.685 INFO:journalctl@ceph.mon.b.vm10.stdout: systemctl: stderr See "systemctl status ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" and "journalctl -xeu ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" for details. 2026-03-09T00:03:15.685 INFO:journalctl@ceph.mon.b.vm10.stdout: Traceback (most recent call last): 2026-03-09T00:03:15.685 INFO:journalctl@ceph.mon.b.vm10.stdout: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-09T00:03:15.685 INFO:journalctl@ceph.mon.b.vm10.stdout: return _run_code(code, main_globals, None, 2026-03-09T00:03:15.685 INFO:journalctl@ceph.mon.b.vm10.stdout: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-09T00:03:15.685 INFO:journalctl@ceph.mon.b.vm10.stdout: exec(code, run_globals) 2026-03-09T00:03:15.685 INFO:journalctl@ceph.mon.b.vm10.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5592, in 2026-03-09T00:03:15.685 INFO:journalctl@ceph.mon.b.vm10.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5580, in main 2026-03-09T00:03:15.685 INFO:journalctl@ceph.mon.b.vm10.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3062, in command_deploy_from 2026-03-09T00:03:15.685 INFO:journalctl@ceph.mon.b.vm10.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3097, in _common_deploy 2026-03-09T00:03:15.685 INFO:journalctl@ceph.mon.b.vm10.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3117, in _deploy_daemon_container 2026-03-09T00:03:15.685 INFO:journalctl@ceph.mon.b.vm10.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 1126, in deploy_daemon 2026-03-09T00:03:15.685 INFO:journalctl@ceph.mon.b.vm10.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/cephadmlib/call_wrappers.py", line 307, in call_throws 2026-03-09T00:03:15.685 INFO:journalctl@ceph.mon.b.vm10.stdout: RuntimeError: Failed command: systemctl restart ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv: Job for ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service failed because the control process exited with error code. 2026-03-09T00:03:15.685 INFO:journalctl@ceph.mon.b.vm10.stdout: See "systemctl status ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" and "journalctl -xeu ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" for details. 2026-03-09T00:03:15.685 INFO:journalctl@ceph.mon.b.vm10.stdout: Traceback (most recent call last): 2026-03-09T00:03:15.685 INFO:journalctl@ceph.mon.b.vm10.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1145, in _check_daemons 2026-03-09T00:03:15.685 INFO:journalctl@ceph.mon.b.vm10.stdout: self.mgr._daemon_action(daemon_spec, action=action) 2026-03-09T00:03:15.685 INFO:journalctl@ceph.mon.b.vm10.stdout: File "/usr/share/ceph/mgr/cephadm/module.py", line 2545, in _daemon_action 2026-03-09T00:03:15.685 INFO:journalctl@ceph.mon.b.vm10.stdout: return self.wait_async( 2026-03-09T00:03:15.685 INFO:journalctl@ceph.mon.b.vm10.stdout: File "/usr/share/ceph/mgr/cephadm/module.py", line 815, in wait_async 2026-03-09T00:03:15.685 INFO:journalctl@ceph.mon.b.vm10.stdout: return self.event_loop.get_result(coro, timeout) 2026-03-09T00:03:15.685 INFO:journalctl@ceph.mon.b.vm10.stdout: File "/usr/share/ceph/mgr/cephadm/ssh.py", line 136, in get_result 2026-03-09T00:03:15.685 INFO:journalctl@ceph.mon.b.vm10.stdout: return future.result(timeout) 2026-03-09T00:03:15.685 INFO:journalctl@ceph.mon.b.vm10.stdout: File "/lib64/python3.9/concurrent/futures/_base.py", line 446, in result 2026-03-09T00:03:15.685 INFO:journalctl@ceph.mon.b.vm10.stdout: return self.__get_result() 2026-03-09T00:03:15.685 INFO:journalctl@ceph.mon.b.vm10.stdout: File "/lib64/python3.9/concurrent/futures/_base.py", line 391, in __get_result 2026-03-09T00:03:15.685 INFO:journalctl@ceph.mon.b.vm10.stdout: raise self._exception 2026-03-09T00:03:15.685 INFO:journalctl@ceph.mon.b.vm10.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1381, in _create_daemon 2026-03-09T00:03:15.685 INFO:journalctl@ceph.mon.b.vm10.stdout: out, err, code = await self._run_cephadm( 2026-03-09T00:03:15.685 INFO:journalctl@ceph.mon.b.vm10.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1724, in _run_cephadm 2026-03-09T00:03:15.685 INFO:journalctl@ceph.mon.b.vm10.stdout: raise OrchestratorError( 2026-03-09T00:03:15.685 INFO:journalctl@ceph.mon.b.vm10.stdout: orchestrator._interface.OrchestratorError: cephadm exited with an error code: 1, stderr: Reconfig daemon iscsi.foo.vm04.fbyciv ... 2026-03-09T00:03:15.685 INFO:journalctl@ceph.mon.b.vm10.stdout: Creating ceph-iscsi config... 2026-03-09T00:03:15.685 INFO:journalctl@ceph.mon.b.vm10.stdout: Write file: /var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/iscsi.foo.vm04.fbyciv/iscsi-gateway.cfg 2026-03-09T00:03:15.685 INFO:journalctl@ceph.mon.b.vm10.stdout: Write file: /var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/iscsi.foo.vm04.fbyciv/tcmu-runner-entrypoint.sh 2026-03-09T00:03:15.685 INFO:journalctl@ceph.mon.b.vm10.stdout: Non-zero exit code 1 from systemctl restart ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv 2026-03-09T00:03:15.685 INFO:journalctl@ceph.mon.b.vm10.stdout: systemctl: stderr Job for ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service failed because the control process exited with error code. 2026-03-09T00:03:15.685 INFO:journalctl@ceph.mon.b.vm10.stdout: systemctl: stderr See "systemctl status ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" and "journalctl -xeu ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" for details. 2026-03-09T00:03:15.685 INFO:journalctl@ceph.mon.b.vm10.stdout: Traceback (most recent call last): 2026-03-09T00:03:15.685 INFO:journalctl@ceph.mon.b.vm10.stdout: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-09T00:03:15.685 INFO:journalctl@ceph.mon.b.vm10.stdout: return _run_code(code, main_globals, None, 2026-03-09T00:03:15.685 INFO:journalctl@ceph.mon.b.vm10.stdout: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-09T00:03:15.685 INFO:journalctl@ceph.mon.b.vm10.stdout: exec(code, run_globals) 2026-03-09T00:03:15.685 INFO:journalctl@ceph.mon.b.vm10.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5592, in 2026-03-09T00:03:15.685 INFO:journalctl@ceph.mon.b.vm10.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5580, in main 2026-03-09T00:03:15.685 INFO:journalctl@ceph.mon.b.vm10.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3062, in command_deploy_from 2026-03-09T00:03:15.686 INFO:journalctl@ceph.mon.b.vm10.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3097, in _common_deploy 2026-03-09T00:03:15.686 INFO:journalctl@ceph.mon.b.vm10.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3117, in _deploy_daemon_container 2026-03-09T00:03:15.686 INFO:journalctl@ceph.mon.b.vm10.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 1126, in deploy_daemon 2026-03-09T00:03:15.686 INFO:journalctl@ceph.mon.b.vm10.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/cephadmlib/call_wrappers.py", line 307, in call_throws 2026-03-09T00:03:15.686 INFO:journalctl@ceph.mon.b.vm10.stdout: RuntimeError: Failed command: systemctl restart ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv: Job for ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service failed because the control process exited with error code. 2026-03-09T00:03:15.686 INFO:journalctl@ceph.mon.b.vm10.stdout: See "systemctl status ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" and "journalctl -xeu ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" for details. 2026-03-09T00:03:15.686 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:15 vm10 ceph-mon[48982]: Reconfiguring node-exporter.b (dependencies changed)... 2026-03-09T00:03:15.686 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:15 vm10 ceph-mon[48982]: Deploying daemon node-exporter.b on vm10 2026-03-09T00:03:15.686 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 09 00:03:15 vm10 systemd[1]: Stopping Ceph node-exporter.b for fdcbddf6-1b49-11f1-80b0-7392062373f9... 2026-03-09T00:03:15.686 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 09 00:03:15 vm10 podman[69996]: 2026-03-09 00:03:15.519156321 +0000 UTC m=+0.032039468 container died c055c663d1e800ade894d290e0fac67be676413cf267a40693df5ff526971e1b (image=quay.io/prometheus/node-exporter:v1.3.1, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b, maintainer=The Prometheus Authors ) 2026-03-09T00:03:15.686 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 09 00:03:15 vm10 podman[69996]: 2026-03-09 00:03:15.544584005 +0000 UTC m=+0.057467152 container remove c055c663d1e800ade894d290e0fac67be676413cf267a40693df5ff526971e1b (image=quay.io/prometheus/node-exporter:v1.3.1, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b, maintainer=The Prometheus Authors ) 2026-03-09T00:03:15.686 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 09 00:03:15 vm10 bash[69996]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b 2026-03-09T00:03:15.686 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 09 00:03:15 vm10 systemd[1]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@node-exporter.b.service: Main process exited, code=exited, status=143/n/a 2026-03-09T00:03:15.686 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 09 00:03:15 vm10 systemd[1]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@node-exporter.b.service: Failed with result 'exit-code'. 2026-03-09T00:03:15.686 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 09 00:03:15 vm10 systemd[1]: Stopped Ceph node-exporter.b for fdcbddf6-1b49-11f1-80b0-7392062373f9. 2026-03-09T00:03:15.686 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 09 00:03:15 vm10 systemd[1]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@node-exporter.b.service: Consumed 1.401s CPU time. 2026-03-09T00:03:16.074 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 09 00:03:15 vm10 systemd[1]: Starting Ceph node-exporter.b for fdcbddf6-1b49-11f1-80b0-7392062373f9... 2026-03-09T00:03:16.074 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 09 00:03:15 vm10 bash[70109]: Trying to pull quay.io/prometheus/node-exporter:v1.7.0... 2026-03-09T00:03:16.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:15 vm04 ceph-mon[46823]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:16.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:15 vm04 ceph-mon[46823]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:16.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:15 vm04 ceph-mon[46823]: Reconfiguring iscsi.foo.vm04.fbyciv (dependencies changed)... 2026-03-09T00:03:16.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:15 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm04.fbyciv", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T00:03:16.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:15 vm04 ceph-mon[46823]: from='mgr.24772 ' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm04.fbyciv", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T00:03:16.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:15 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:03:16.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:15 vm04 ceph-mon[46823]: Reconfiguring daemon iscsi.foo.vm04.fbyciv on vm04 2026-03-09T00:03:16.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:15 vm04 ceph-mon[46823]: cephadm exited with an error code: 1, stderr: Reconfig daemon iscsi.foo.vm04.fbyciv ... 2026-03-09T00:03:16.101 INFO:journalctl@ceph.mon.a.vm04.stdout: Creating ceph-iscsi config... 2026-03-09T00:03:16.101 INFO:journalctl@ceph.mon.a.vm04.stdout: Write file: /var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/iscsi.foo.vm04.fbyciv/iscsi-gateway.cfg 2026-03-09T00:03:16.101 INFO:journalctl@ceph.mon.a.vm04.stdout: Write file: /var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/iscsi.foo.vm04.fbyciv/tcmu-runner-entrypoint.sh 2026-03-09T00:03:16.101 INFO:journalctl@ceph.mon.a.vm04.stdout: Non-zero exit code 1 from systemctl restart ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv 2026-03-09T00:03:16.101 INFO:journalctl@ceph.mon.a.vm04.stdout: systemctl: stderr Job for ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service failed because the control process exited with error code. 2026-03-09T00:03:16.101 INFO:journalctl@ceph.mon.a.vm04.stdout: systemctl: stderr See "systemctl status ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" and "journalctl -xeu ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" for details. 2026-03-09T00:03:16.101 INFO:journalctl@ceph.mon.a.vm04.stdout: Traceback (most recent call last): 2026-03-09T00:03:16.101 INFO:journalctl@ceph.mon.a.vm04.stdout: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-09T00:03:16.101 INFO:journalctl@ceph.mon.a.vm04.stdout: return _run_code(code, main_globals, None, 2026-03-09T00:03:16.101 INFO:journalctl@ceph.mon.a.vm04.stdout: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-09T00:03:16.101 INFO:journalctl@ceph.mon.a.vm04.stdout: exec(code, run_globals) 2026-03-09T00:03:16.101 INFO:journalctl@ceph.mon.a.vm04.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5592, in 2026-03-09T00:03:16.101 INFO:journalctl@ceph.mon.a.vm04.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5580, in main 2026-03-09T00:03:16.101 INFO:journalctl@ceph.mon.a.vm04.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3062, in command_deploy_from 2026-03-09T00:03:16.101 INFO:journalctl@ceph.mon.a.vm04.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3097, in _common_deploy 2026-03-09T00:03:16.101 INFO:journalctl@ceph.mon.a.vm04.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3117, in _deploy_daemon_container 2026-03-09T00:03:16.101 INFO:journalctl@ceph.mon.a.vm04.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 1126, in deploy_daemon 2026-03-09T00:03:16.101 INFO:journalctl@ceph.mon.a.vm04.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/cephadmlib/call_wrappers.py", line 307, in call_throws 2026-03-09T00:03:16.101 INFO:journalctl@ceph.mon.a.vm04.stdout: RuntimeError: Failed command: systemctl restart ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv: Job for ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service failed because the control process exited with error code. 2026-03-09T00:03:16.101 INFO:journalctl@ceph.mon.a.vm04.stdout: See "systemctl status ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" and "journalctl -xeu ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" for details. 2026-03-09T00:03:16.101 INFO:journalctl@ceph.mon.a.vm04.stdout: Traceback (most recent call last): 2026-03-09T00:03:16.101 INFO:journalctl@ceph.mon.a.vm04.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1145, in _check_daemons 2026-03-09T00:03:16.101 INFO:journalctl@ceph.mon.a.vm04.stdout: self.mgr._daemon_action(daemon_spec, action=action) 2026-03-09T00:03:16.101 INFO:journalctl@ceph.mon.a.vm04.stdout: File "/usr/share/ceph/mgr/cephadm/module.py", line 2545, in _daemon_action 2026-03-09T00:03:16.101 INFO:journalctl@ceph.mon.a.vm04.stdout: return self.wait_async( 2026-03-09T00:03:16.101 INFO:journalctl@ceph.mon.a.vm04.stdout: File "/usr/share/ceph/mgr/cephadm/module.py", line 815, in wait_async 2026-03-09T00:03:16.101 INFO:journalctl@ceph.mon.a.vm04.stdout: return self.event_loop.get_result(coro, timeout) 2026-03-09T00:03:16.101 INFO:journalctl@ceph.mon.a.vm04.stdout: File "/usr/share/ceph/mgr/cephadm/ssh.py", line 136, in get_result 2026-03-09T00:03:16.101 INFO:journalctl@ceph.mon.a.vm04.stdout: return future.result(timeout) 2026-03-09T00:03:16.101 INFO:journalctl@ceph.mon.a.vm04.stdout: File "/lib64/python3.9/concurrent/futures/_base.py", line 446, in result 2026-03-09T00:03:16.101 INFO:journalctl@ceph.mon.a.vm04.stdout: return self.__get_result() 2026-03-09T00:03:16.101 INFO:journalctl@ceph.mon.a.vm04.stdout: File "/lib64/python3.9/concurrent/futures/_base.py", line 391, in __get_result 2026-03-09T00:03:16.101 INFO:journalctl@ceph.mon.a.vm04.stdout: raise self._exception 2026-03-09T00:03:16.102 INFO:journalctl@ceph.mon.a.vm04.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1381, in _create_daemon 2026-03-09T00:03:16.102 INFO:journalctl@ceph.mon.a.vm04.stdout: out, err, code = await self._run_cephadm( 2026-03-09T00:03:16.102 INFO:journalctl@ceph.mon.a.vm04.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1724, in _run_cephadm 2026-03-09T00:03:16.102 INFO:journalctl@ceph.mon.a.vm04.stdout: raise OrchestratorError( 2026-03-09T00:03:16.102 INFO:journalctl@ceph.mon.a.vm04.stdout: orchestrator._interface.OrchestratorError: cephadm exited with an error code: 1, stderr: Reconfig daemon iscsi.foo.vm04.fbyciv ... 2026-03-09T00:03:16.102 INFO:journalctl@ceph.mon.a.vm04.stdout: Creating ceph-iscsi config... 2026-03-09T00:03:16.102 INFO:journalctl@ceph.mon.a.vm04.stdout: Write file: /var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/iscsi.foo.vm04.fbyciv/iscsi-gateway.cfg 2026-03-09T00:03:16.102 INFO:journalctl@ceph.mon.a.vm04.stdout: Write file: /var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/iscsi.foo.vm04.fbyciv/tcmu-runner-entrypoint.sh 2026-03-09T00:03:16.102 INFO:journalctl@ceph.mon.a.vm04.stdout: Non-zero exit code 1 from systemctl restart ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv 2026-03-09T00:03:16.102 INFO:journalctl@ceph.mon.a.vm04.stdout: systemctl: stderr Job for ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service failed because the control process exited with error code. 2026-03-09T00:03:16.102 INFO:journalctl@ceph.mon.a.vm04.stdout: systemctl: stderr See "systemctl status ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" and "journalctl -xeu ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" for details. 2026-03-09T00:03:16.102 INFO:journalctl@ceph.mon.a.vm04.stdout: Traceback (most recent call last): 2026-03-09T00:03:16.102 INFO:journalctl@ceph.mon.a.vm04.stdout: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-09T00:03:16.102 INFO:journalctl@ceph.mon.a.vm04.stdout: return _run_code(code, main_globals, None, 2026-03-09T00:03:16.102 INFO:journalctl@ceph.mon.a.vm04.stdout: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-09T00:03:16.102 INFO:journalctl@ceph.mon.a.vm04.stdout: exec(code, run_globals) 2026-03-09T00:03:16.102 INFO:journalctl@ceph.mon.a.vm04.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5592, in 2026-03-09T00:03:16.102 INFO:journalctl@ceph.mon.a.vm04.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5580, in main 2026-03-09T00:03:16.102 INFO:journalctl@ceph.mon.a.vm04.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3062, in command_deploy_from 2026-03-09T00:03:16.102 INFO:journalctl@ceph.mon.a.vm04.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3097, in _common_deploy 2026-03-09T00:03:16.102 INFO:journalctl@ceph.mon.a.vm04.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3117, in _deploy_daemon_container 2026-03-09T00:03:16.102 INFO:journalctl@ceph.mon.a.vm04.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 1126, in deploy_daemon 2026-03-09T00:03:16.102 INFO:journalctl@ceph.mon.a.vm04.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/cephadmlib/call_wrappers.py", line 307, in call_throws 2026-03-09T00:03:16.102 INFO:journalctl@ceph.mon.a.vm04.stdout: RuntimeError: Failed command: systemctl restart ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv: Job for ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service failed because the control process exited with error code. 2026-03-09T00:03:16.102 INFO:journalctl@ceph.mon.a.vm04.stdout: See "systemctl status ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" and "journalctl -xeu ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" for details. 2026-03-09T00:03:16.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:15 vm04 ceph-mon[46823]: Reconfiguring node-exporter.b (dependencies changed)... 2026-03-09T00:03:16.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:15 vm04 ceph-mon[46823]: Deploying daemon node-exporter.b on vm10 2026-03-09T00:03:16.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:15 vm04 ceph-mon[51053]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:16.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:15 vm04 ceph-mon[51053]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:16.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:15 vm04 ceph-mon[51053]: Reconfiguring iscsi.foo.vm04.fbyciv (dependencies changed)... 2026-03-09T00:03:16.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:15 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm04.fbyciv", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T00:03:16.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:15 vm04 ceph-mon[51053]: from='mgr.24772 ' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm04.fbyciv", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T00:03:16.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:15 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:03:16.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:15 vm04 ceph-mon[51053]: Reconfiguring daemon iscsi.foo.vm04.fbyciv on vm04 2026-03-09T00:03:16.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:15 vm04 ceph-mon[51053]: cephadm exited with an error code: 1, stderr: Reconfig daemon iscsi.foo.vm04.fbyciv ... 2026-03-09T00:03:16.102 INFO:journalctl@ceph.mon.c.vm04.stdout: Creating ceph-iscsi config... 2026-03-09T00:03:16.102 INFO:journalctl@ceph.mon.c.vm04.stdout: Write file: /var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/iscsi.foo.vm04.fbyciv/iscsi-gateway.cfg 2026-03-09T00:03:16.102 INFO:journalctl@ceph.mon.c.vm04.stdout: Write file: /var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/iscsi.foo.vm04.fbyciv/tcmu-runner-entrypoint.sh 2026-03-09T00:03:16.102 INFO:journalctl@ceph.mon.c.vm04.stdout: Non-zero exit code 1 from systemctl restart ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv 2026-03-09T00:03:16.102 INFO:journalctl@ceph.mon.c.vm04.stdout: systemctl: stderr Job for ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service failed because the control process exited with error code. 2026-03-09T00:03:16.102 INFO:journalctl@ceph.mon.c.vm04.stdout: systemctl: stderr See "systemctl status ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" and "journalctl -xeu ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" for details. 2026-03-09T00:03:16.102 INFO:journalctl@ceph.mon.c.vm04.stdout: Traceback (most recent call last): 2026-03-09T00:03:16.102 INFO:journalctl@ceph.mon.c.vm04.stdout: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-09T00:03:16.102 INFO:journalctl@ceph.mon.c.vm04.stdout: return _run_code(code, main_globals, None, 2026-03-09T00:03:16.102 INFO:journalctl@ceph.mon.c.vm04.stdout: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-09T00:03:16.102 INFO:journalctl@ceph.mon.c.vm04.stdout: exec(code, run_globals) 2026-03-09T00:03:16.102 INFO:journalctl@ceph.mon.c.vm04.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5592, in 2026-03-09T00:03:16.102 INFO:journalctl@ceph.mon.c.vm04.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5580, in main 2026-03-09T00:03:16.102 INFO:journalctl@ceph.mon.c.vm04.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3062, in command_deploy_from 2026-03-09T00:03:16.102 INFO:journalctl@ceph.mon.c.vm04.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3097, in _common_deploy 2026-03-09T00:03:16.102 INFO:journalctl@ceph.mon.c.vm04.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3117, in _deploy_daemon_container 2026-03-09T00:03:16.102 INFO:journalctl@ceph.mon.c.vm04.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 1126, in deploy_daemon 2026-03-09T00:03:16.102 INFO:journalctl@ceph.mon.c.vm04.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/cephadmlib/call_wrappers.py", line 307, in call_throws 2026-03-09T00:03:16.102 INFO:journalctl@ceph.mon.c.vm04.stdout: RuntimeError: Failed command: systemctl restart ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv: Job for ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service failed because the control process exited with error code. 2026-03-09T00:03:16.102 INFO:journalctl@ceph.mon.c.vm04.stdout: See "systemctl status ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" and "journalctl -xeu ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" for details. 2026-03-09T00:03:16.102 INFO:journalctl@ceph.mon.c.vm04.stdout: Traceback (most recent call last): 2026-03-09T00:03:16.102 INFO:journalctl@ceph.mon.c.vm04.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1145, in _check_daemons 2026-03-09T00:03:16.102 INFO:journalctl@ceph.mon.c.vm04.stdout: self.mgr._daemon_action(daemon_spec, action=action) 2026-03-09T00:03:16.102 INFO:journalctl@ceph.mon.c.vm04.stdout: File "/usr/share/ceph/mgr/cephadm/module.py", line 2545, in _daemon_action 2026-03-09T00:03:16.103 INFO:journalctl@ceph.mon.c.vm04.stdout: return self.wait_async( 2026-03-09T00:03:16.103 INFO:journalctl@ceph.mon.c.vm04.stdout: File "/usr/share/ceph/mgr/cephadm/module.py", line 815, in wait_async 2026-03-09T00:03:16.103 INFO:journalctl@ceph.mon.c.vm04.stdout: return self.event_loop.get_result(coro, timeout) 2026-03-09T00:03:16.103 INFO:journalctl@ceph.mon.c.vm04.stdout: File "/usr/share/ceph/mgr/cephadm/ssh.py", line 136, in get_result 2026-03-09T00:03:16.103 INFO:journalctl@ceph.mon.c.vm04.stdout: return future.result(timeout) 2026-03-09T00:03:16.103 INFO:journalctl@ceph.mon.c.vm04.stdout: File "/lib64/python3.9/concurrent/futures/_base.py", line 446, in result 2026-03-09T00:03:16.103 INFO:journalctl@ceph.mon.c.vm04.stdout: return self.__get_result() 2026-03-09T00:03:16.103 INFO:journalctl@ceph.mon.c.vm04.stdout: File "/lib64/python3.9/concurrent/futures/_base.py", line 391, in __get_result 2026-03-09T00:03:16.103 INFO:journalctl@ceph.mon.c.vm04.stdout: raise self._exception 2026-03-09T00:03:16.103 INFO:journalctl@ceph.mon.c.vm04.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1381, in _create_daemon 2026-03-09T00:03:16.103 INFO:journalctl@ceph.mon.c.vm04.stdout: out, err, code = await self._run_cephadm( 2026-03-09T00:03:16.103 INFO:journalctl@ceph.mon.c.vm04.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1724, in _run_cephadm 2026-03-09T00:03:16.103 INFO:journalctl@ceph.mon.c.vm04.stdout: raise OrchestratorError( 2026-03-09T00:03:16.103 INFO:journalctl@ceph.mon.c.vm04.stdout: orchestrator._interface.OrchestratorError: cephadm exited with an error code: 1, stderr: Reconfig daemon iscsi.foo.vm04.fbyciv ... 2026-03-09T00:03:16.103 INFO:journalctl@ceph.mon.c.vm04.stdout: Creating ceph-iscsi config... 2026-03-09T00:03:16.103 INFO:journalctl@ceph.mon.c.vm04.stdout: Write file: /var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/iscsi.foo.vm04.fbyciv/iscsi-gateway.cfg 2026-03-09T00:03:16.103 INFO:journalctl@ceph.mon.c.vm04.stdout: Write file: /var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/iscsi.foo.vm04.fbyciv/tcmu-runner-entrypoint.sh 2026-03-09T00:03:16.103 INFO:journalctl@ceph.mon.c.vm04.stdout: Non-zero exit code 1 from systemctl restart ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv 2026-03-09T00:03:16.103 INFO:journalctl@ceph.mon.c.vm04.stdout: systemctl: stderr Job for ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service failed because the control process exited with error code. 2026-03-09T00:03:16.103 INFO:journalctl@ceph.mon.c.vm04.stdout: systemctl: stderr See "systemctl status ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" and "journalctl -xeu ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" for details. 2026-03-09T00:03:16.103 INFO:journalctl@ceph.mon.c.vm04.stdout: Traceback (most recent call last): 2026-03-09T00:03:16.103 INFO:journalctl@ceph.mon.c.vm04.stdout: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-09T00:03:16.103 INFO:journalctl@ceph.mon.c.vm04.stdout: return _run_code(code, main_globals, None, 2026-03-09T00:03:16.103 INFO:journalctl@ceph.mon.c.vm04.stdout: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-09T00:03:16.103 INFO:journalctl@ceph.mon.c.vm04.stdout: exec(code, run_globals) 2026-03-09T00:03:16.103 INFO:journalctl@ceph.mon.c.vm04.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5592, in 2026-03-09T00:03:16.103 INFO:journalctl@ceph.mon.c.vm04.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5580, in main 2026-03-09T00:03:16.103 INFO:journalctl@ceph.mon.c.vm04.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3062, in command_deploy_from 2026-03-09T00:03:16.103 INFO:journalctl@ceph.mon.c.vm04.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3097, in _common_deploy 2026-03-09T00:03:16.103 INFO:journalctl@ceph.mon.c.vm04.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3117, in _deploy_daemon_container 2026-03-09T00:03:16.103 INFO:journalctl@ceph.mon.c.vm04.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 1126, in deploy_daemon 2026-03-09T00:03:16.103 INFO:journalctl@ceph.mon.c.vm04.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/cephadmlib/call_wrappers.py", line 307, in call_throws 2026-03-09T00:03:16.103 INFO:journalctl@ceph.mon.c.vm04.stdout: RuntimeError: Failed command: systemctl restart ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv: Job for ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service failed because the control process exited with error code. 2026-03-09T00:03:16.103 INFO:journalctl@ceph.mon.c.vm04.stdout: See "systemctl status ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" and "journalctl -xeu ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" for details. 2026-03-09T00:03:16.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:15 vm04 ceph-mon[51053]: Reconfiguring node-exporter.b (dependencies changed)... 2026-03-09T00:03:16.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:15 vm04 ceph-mon[51053]: Deploying daemon node-exporter.b on vm10 2026-03-09T00:03:16.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:03:16 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:03:16.591Z caller=cluster.go:706 level=info component=cluster msg="gossip not settled" polls=0 before=0 now=1 elapsed=2.000664033s 2026-03-09T00:03:16.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:16 vm04 ceph-mon[46823]: pgmap v10: 161 pgs: 161 active+clean; 457 KiB data, 85 MiB used, 160 GiB / 160 GiB avail; 17 KiB/s rd, 22 KiB/s wr, 9 op/s 2026-03-09T00:03:16.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:16 vm04 ceph-mon[51053]: pgmap v10: 161 pgs: 161 active+clean; 457 KiB data, 85 MiB used, 160 GiB / 160 GiB avail; 17 KiB/s rd, 22 KiB/s wr, 9 op/s 2026-03-09T00:03:16.923 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:16 vm10 ceph-mon[48982]: pgmap v10: 161 pgs: 161 active+clean; 457 KiB data, 85 MiB used, 160 GiB / 160 GiB avail; 17 KiB/s rd, 22 KiB/s wr, 9 op/s 2026-03-09T00:03:17.491 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 09 00:03:17 vm10 bash[70109]: Getting image source signatures 2026-03-09T00:03:17.491 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 09 00:03:17 vm10 bash[70109]: Copying blob sha256:2abcce694348cd2c949c0e98a7400ebdfd8341021bcf6b541bc72033ce982510 2026-03-09T00:03:17.491 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 09 00:03:17 vm10 bash[70109]: Copying blob sha256:455fd88e5221bc1e278ef2d059cd70e4df99a24e5af050ede621534276f6cf9a 2026-03-09T00:03:17.492 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 09 00:03:17 vm10 bash[70109]: Copying blob sha256:324153f2810a9927fcce320af9e4e291e0b6e805cbdd1f338386c756b9defa24 2026-03-09T00:03:17.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:17 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:03:18.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:17 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:03:18.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:17 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:03:18.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:18 vm10 ceph-mon[48982]: pgmap v11: 161 pgs: 161 active+clean; 457 KiB data, 85 MiB used, 160 GiB / 160 GiB avail; 17 KiB/s rd, 22 KiB/s wr, 9 op/s 2026-03-09T00:03:18.828 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 09 00:03:18 vm10 bash[70109]: Copying config sha256:72c9c208898624938c9e4183d6686ea4a5fd3f912bc29bc3f00147924c521a3e 2026-03-09T00:03:18.828 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 09 00:03:18 vm10 bash[70109]: Writing manifest to image destination 2026-03-09T00:03:18.828 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 09 00:03:18 vm10 podman[70109]: 2026-03-09 00:03:18.716741135 +0000 UTC m=+2.860758369 container create d059c0022310421951c62bc3da32e0426d718d58f39e255eaade8d1ff44ff34f (image=quay.io/prometheus/node-exporter:v1.7.0, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b, maintainer=The Prometheus Authors ) 2026-03-09T00:03:18.828 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 09 00:03:18 vm10 podman[70109]: 2026-03-09 00:03:18.684863 +0000 UTC m=+2.828880245 image pull 72c9c208898624938c9e4183d6686ea4a5fd3f912bc29bc3f00147924c521a3e quay.io/prometheus/node-exporter:v1.7.0 2026-03-09T00:03:19.035 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:18 vm04 ceph-mon[46823]: pgmap v11: 161 pgs: 161 active+clean; 457 KiB data, 85 MiB used, 160 GiB / 160 GiB avail; 17 KiB/s rd, 22 KiB/s wr, 9 op/s 2026-03-09T00:03:19.035 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:18 vm04 ceph-mon[51053]: pgmap v11: 161 pgs: 161 active+clean; 457 KiB data, 85 MiB used, 160 GiB / 160 GiB avail; 17 KiB/s rd, 22 KiB/s wr, 9 op/s 2026-03-09T00:03:19.090 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:03:18 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:03:18] "GET /metrics HTTP/1.1" 200 37529 "" "Prometheus/2.33.4" 2026-03-09T00:03:19.091 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 09 00:03:18 vm10 podman[70109]: 2026-03-09 00:03:18.833901895 +0000 UTC m=+2.977919140 container init d059c0022310421951c62bc3da32e0426d718d58f39e255eaade8d1ff44ff34f (image=quay.io/prometheus/node-exporter:v1.7.0, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b, maintainer=The Prometheus Authors ) 2026-03-09T00:03:19.091 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 09 00:03:18 vm10 podman[70109]: 2026-03-09 00:03:18.837343796 +0000 UTC m=+2.981361041 container start d059c0022310421951c62bc3da32e0426d718d58f39e255eaade8d1ff44ff34f (image=quay.io/prometheus/node-exporter:v1.7.0, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b, maintainer=The Prometheus Authors ) 2026-03-09T00:03:19.091 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 09 00:03:18 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[70167]: ts=2026-03-09T00:03:18.850Z caller=node_exporter.go:192 level=info msg="Starting node_exporter" version="(version=1.7.0, branch=HEAD, revision=7333465abf9efba81876303bb57e6fadb946041b)" 2026-03-09T00:03:19.091 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 09 00:03:18 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[70167]: ts=2026-03-09T00:03:18.850Z caller=node_exporter.go:193 level=info msg="Build context" build_context="(go=go1.21.4, platform=linux/amd64, user=root@35918982f6d8, date=20231112-23:53:35, tags=netgo osusergo static_build)" 2026-03-09T00:03:19.091 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 09 00:03:18 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[70167]: ts=2026-03-09T00:03:18.851Z caller=diskstats_common.go:111 level=info collector=diskstats msg="Parsed flag --collector.diskstats.device-exclude" flag=^(ram|loop|fd|(h|s|v|xv)d[a-z]|nvme\d+n\d+p)\d+$ 2026-03-09T00:03:19.091 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 09 00:03:18 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[70167]: ts=2026-03-09T00:03:18.851Z caller=diskstats_linux.go:265 level=error collector=diskstats msg="Failed to open directory, disabling udev device properties" path=/run/udev/data 2026-03-09T00:03:19.091 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 09 00:03:18 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[70167]: ts=2026-03-09T00:03:18.851Z caller=filesystem_common.go:111 level=info collector=filesystem msg="Parsed flag --collector.filesystem.mount-points-exclude" flag=^/(dev|proc|run/credentials/.+|sys|var/lib/docker/.+|var/lib/containers/storage/.+)($|/) 2026-03-09T00:03:19.091 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 09 00:03:18 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[70167]: ts=2026-03-09T00:03:18.851Z caller=filesystem_common.go:113 level=info collector=filesystem msg="Parsed flag --collector.filesystem.fs-types-exclude" flag=^(autofs|binfmt_misc|bpf|cgroup2?|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|iso9660|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|sysfs|tracefs)$ 2026-03-09T00:03:19.091 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 09 00:03:18 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[70167]: ts=2026-03-09T00:03:18.852Z caller=node_exporter.go:110 level=info msg="Enabled collectors" 2026-03-09T00:03:19.091 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 09 00:03:18 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[70167]: ts=2026-03-09T00:03:18.852Z caller=node_exporter.go:117 level=info collector=arp 2026-03-09T00:03:19.091 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 09 00:03:18 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[70167]: ts=2026-03-09T00:03:18.852Z caller=node_exporter.go:117 level=info collector=bcache 2026-03-09T00:03:19.091 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 09 00:03:18 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[70167]: ts=2026-03-09T00:03:18.852Z caller=node_exporter.go:117 level=info collector=bonding 2026-03-09T00:03:19.091 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 09 00:03:18 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[70167]: ts=2026-03-09T00:03:18.852Z caller=node_exporter.go:117 level=info collector=btrfs 2026-03-09T00:03:19.091 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 09 00:03:18 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[70167]: ts=2026-03-09T00:03:18.852Z caller=node_exporter.go:117 level=info collector=conntrack 2026-03-09T00:03:19.091 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 09 00:03:18 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[70167]: ts=2026-03-09T00:03:18.852Z caller=node_exporter.go:117 level=info collector=cpu 2026-03-09T00:03:19.091 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 09 00:03:18 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[70167]: ts=2026-03-09T00:03:18.852Z caller=node_exporter.go:117 level=info collector=cpufreq 2026-03-09T00:03:19.091 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 09 00:03:18 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[70167]: ts=2026-03-09T00:03:18.852Z caller=node_exporter.go:117 level=info collector=diskstats 2026-03-09T00:03:19.091 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 09 00:03:18 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[70167]: ts=2026-03-09T00:03:18.852Z caller=node_exporter.go:117 level=info collector=dmi 2026-03-09T00:03:19.091 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 09 00:03:18 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[70167]: ts=2026-03-09T00:03:18.852Z caller=node_exporter.go:117 level=info collector=edac 2026-03-09T00:03:19.091 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 09 00:03:18 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[70167]: ts=2026-03-09T00:03:18.852Z caller=node_exporter.go:117 level=info collector=entropy 2026-03-09T00:03:19.091 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 09 00:03:18 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[70167]: ts=2026-03-09T00:03:18.852Z caller=node_exporter.go:117 level=info collector=fibrechannel 2026-03-09T00:03:19.091 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 09 00:03:18 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[70167]: ts=2026-03-09T00:03:18.852Z caller=node_exporter.go:117 level=info collector=filefd 2026-03-09T00:03:19.091 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 09 00:03:18 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[70167]: ts=2026-03-09T00:03:18.852Z caller=node_exporter.go:117 level=info collector=filesystem 2026-03-09T00:03:19.091 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 09 00:03:18 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[70167]: ts=2026-03-09T00:03:18.852Z caller=node_exporter.go:117 level=info collector=hwmon 2026-03-09T00:03:19.091 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 09 00:03:18 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[70167]: ts=2026-03-09T00:03:18.852Z caller=node_exporter.go:117 level=info collector=infiniband 2026-03-09T00:03:19.091 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 09 00:03:18 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[70167]: ts=2026-03-09T00:03:18.852Z caller=node_exporter.go:117 level=info collector=ipvs 2026-03-09T00:03:19.091 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 09 00:03:18 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[70167]: ts=2026-03-09T00:03:18.852Z caller=node_exporter.go:117 level=info collector=loadavg 2026-03-09T00:03:19.091 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 09 00:03:18 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[70167]: ts=2026-03-09T00:03:18.852Z caller=node_exporter.go:117 level=info collector=mdadm 2026-03-09T00:03:19.091 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 09 00:03:18 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[70167]: ts=2026-03-09T00:03:18.852Z caller=node_exporter.go:117 level=info collector=meminfo 2026-03-09T00:03:19.091 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 09 00:03:18 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[70167]: ts=2026-03-09T00:03:18.852Z caller=node_exporter.go:117 level=info collector=netclass 2026-03-09T00:03:19.091 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 09 00:03:18 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[70167]: ts=2026-03-09T00:03:18.852Z caller=node_exporter.go:117 level=info collector=netdev 2026-03-09T00:03:19.091 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 09 00:03:18 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[70167]: ts=2026-03-09T00:03:18.852Z caller=node_exporter.go:117 level=info collector=netstat 2026-03-09T00:03:19.091 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 09 00:03:18 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[70167]: ts=2026-03-09T00:03:18.852Z caller=node_exporter.go:117 level=info collector=nfs 2026-03-09T00:03:19.091 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 09 00:03:18 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[70167]: ts=2026-03-09T00:03:18.852Z caller=node_exporter.go:117 level=info collector=nfsd 2026-03-09T00:03:19.091 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 09 00:03:18 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[70167]: ts=2026-03-09T00:03:18.852Z caller=node_exporter.go:117 level=info collector=nvme 2026-03-09T00:03:19.091 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 09 00:03:18 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[70167]: ts=2026-03-09T00:03:18.852Z caller=node_exporter.go:117 level=info collector=os 2026-03-09T00:03:19.091 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 09 00:03:18 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[70167]: ts=2026-03-09T00:03:18.852Z caller=node_exporter.go:117 level=info collector=powersupplyclass 2026-03-09T00:03:19.091 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 09 00:03:18 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[70167]: ts=2026-03-09T00:03:18.852Z caller=node_exporter.go:117 level=info collector=pressure 2026-03-09T00:03:19.091 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 09 00:03:18 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[70167]: ts=2026-03-09T00:03:18.852Z caller=node_exporter.go:117 level=info collector=rapl 2026-03-09T00:03:19.091 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 09 00:03:18 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[70167]: ts=2026-03-09T00:03:18.852Z caller=node_exporter.go:117 level=info collector=schedstat 2026-03-09T00:03:19.091 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 09 00:03:18 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[70167]: ts=2026-03-09T00:03:18.852Z caller=node_exporter.go:117 level=info collector=selinux 2026-03-09T00:03:19.091 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 09 00:03:18 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[70167]: ts=2026-03-09T00:03:18.852Z caller=node_exporter.go:117 level=info collector=sockstat 2026-03-09T00:03:19.091 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 09 00:03:18 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[70167]: ts=2026-03-09T00:03:18.852Z caller=node_exporter.go:117 level=info collector=softnet 2026-03-09T00:03:19.091 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 09 00:03:18 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[70167]: ts=2026-03-09T00:03:18.852Z caller=node_exporter.go:117 level=info collector=stat 2026-03-09T00:03:19.091 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 09 00:03:18 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[70167]: ts=2026-03-09T00:03:18.852Z caller=node_exporter.go:117 level=info collector=tapestats 2026-03-09T00:03:19.091 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 09 00:03:18 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[70167]: ts=2026-03-09T00:03:18.852Z caller=node_exporter.go:117 level=info collector=textfile 2026-03-09T00:03:19.091 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 09 00:03:18 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[70167]: ts=2026-03-09T00:03:18.852Z caller=node_exporter.go:117 level=info collector=thermal_zone 2026-03-09T00:03:19.091 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 09 00:03:18 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[70167]: ts=2026-03-09T00:03:18.852Z caller=node_exporter.go:117 level=info collector=time 2026-03-09T00:03:19.091 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 09 00:03:18 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[70167]: ts=2026-03-09T00:03:18.852Z caller=node_exporter.go:117 level=info collector=udp_queues 2026-03-09T00:03:19.091 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 09 00:03:18 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[70167]: ts=2026-03-09T00:03:18.852Z caller=node_exporter.go:117 level=info collector=uname 2026-03-09T00:03:19.091 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 09 00:03:18 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[70167]: ts=2026-03-09T00:03:18.852Z caller=node_exporter.go:117 level=info collector=vmstat 2026-03-09T00:03:19.091 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 09 00:03:18 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[70167]: ts=2026-03-09T00:03:18.852Z caller=node_exporter.go:117 level=info collector=xfs 2026-03-09T00:03:19.091 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 09 00:03:18 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[70167]: ts=2026-03-09T00:03:18.852Z caller=node_exporter.go:117 level=info collector=zfs 2026-03-09T00:03:19.091 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 09 00:03:18 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[70167]: ts=2026-03-09T00:03:18.853Z caller=tls_config.go:274 level=info msg="Listening on" address=[::]:9100 2026-03-09T00:03:19.091 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 09 00:03:18 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b[70167]: ts=2026-03-09T00:03:18.853Z caller=tls_config.go:277 level=info msg="TLS is disabled." http2=false address=[::]:9100 2026-03-09T00:03:19.092 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 09 00:03:18 vm10 bash[70109]: d059c0022310421951c62bc3da32e0426d718d58f39e255eaade8d1ff44ff34f 2026-03-09T00:03:19.092 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 09 00:03:18 vm10 systemd[1]: Started Ceph node-exporter.b for fdcbddf6-1b49-11f1-80b0-7392062373f9. 2026-03-09T00:03:19.350 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:03:19 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[47033]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:03:19] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-09T00:03:20.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:20 vm10 ceph-mon[48982]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:20.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:20 vm10 ceph-mon[48982]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:20.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:20 vm10 ceph-mon[48982]: Reconfiguring prometheus.a (dependencies changed)... 2026-03-09T00:03:20.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:20 vm10 ceph-mon[48982]: Deploying daemon prometheus.a on vm10 2026-03-09T00:03:20.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:20 vm04 ceph-mon[46823]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:20.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:20 vm04 ceph-mon[46823]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:20.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:20 vm04 ceph-mon[46823]: Reconfiguring prometheus.a (dependencies changed)... 2026-03-09T00:03:20.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:20 vm04 ceph-mon[46823]: Deploying daemon prometheus.a on vm10 2026-03-09T00:03:20.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:20 vm04 ceph-mon[51053]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:20.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:20 vm04 ceph-mon[51053]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:20.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:20 vm04 ceph-mon[51053]: Reconfiguring prometheus.a (dependencies changed)... 2026-03-09T00:03:20.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:20 vm04 ceph-mon[51053]: Deploying daemon prometheus.a on vm10 2026-03-09T00:03:21.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:21 vm10 ceph-mon[48982]: pgmap v12: 161 pgs: 161 active+clean; 457 KiB data, 85 MiB used, 160 GiB / 160 GiB avail; 17 KiB/s rd, 22 KiB/s wr, 10 op/s 2026-03-09T00:03:21.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:21 vm04 ceph-mon[51053]: pgmap v12: 161 pgs: 161 active+clean; 457 KiB data, 85 MiB used, 160 GiB / 160 GiB avail; 17 KiB/s rd, 22 KiB/s wr, 10 op/s 2026-03-09T00:03:21.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:21 vm04 ceph-mon[46823]: pgmap v12: 161 pgs: 161 active+clean; 457 KiB data, 85 MiB used, 160 GiB / 160 GiB avail; 17 KiB/s rd, 22 KiB/s wr, 10 op/s 2026-03-09T00:03:22.578 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:03:22 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[65912]: ts=2026-03-09T00:03:22.314Z caller=manager.go:609 level=warn component="rule manager" group=pools msg="Evaluating rule failed" rule="alert: CephPoolGrowthWarning\nexpr: (predict_linear(ceph_pool_percent_used[2d], 3600 * 24 * 5) * on(pool_id) group_right()\n ceph_pool_metadata) >= 95\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.9.2\n severity: warning\n type: ceph_default\nannotations:\n description: |\n Pool '{{ $labels.name }}' will be full in less than 5 days assuming the average fill-up rate of the past 48 hours.\n summary: Pool growth rate may soon exceed it's capacity\n" err="found duplicate series for the match group {pool_id=\"1\"} on the left hand-side of the operation: [{instance=\"192.168.123.110:9283\", job=\"ceph\", pool_id=\"1\"}, {instance=\"192.168.123.104:9283\", job=\"ceph\", pool_id=\"1\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T00:03:23.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:23 vm10 ceph-mon[48982]: pgmap v13: 161 pgs: 161 active+clean; 457 KiB data, 85 MiB used, 160 GiB / 160 GiB avail; 767 B/s rd, 0 op/s 2026-03-09T00:03:23.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:23 vm04 ceph-mon[51053]: pgmap v13: 161 pgs: 161 active+clean; 457 KiB data, 85 MiB used, 160 GiB / 160 GiB avail; 767 B/s rd, 0 op/s 2026-03-09T00:03:23.614 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:23 vm04 ceph-mon[46823]: pgmap v13: 161 pgs: 161 active+clean; 457 KiB data, 85 MiB used, 160 GiB / 160 GiB avail; 767 B/s rd, 0 op/s 2026-03-09T00:03:24.850 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:03:24 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:03:24.594Z caller=cluster.go:698 level=info component=cluster msg="gossip settled; proceeding" elapsed=10.003256008s 2026-03-09T00:03:26.065 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:25 vm10 ceph-mon[48982]: pgmap v14: 161 pgs: 161 active+clean; 457 KiB data, 85 MiB used, 160 GiB / 160 GiB avail; 767 B/s rd, 0 op/s 2026-03-09T00:03:26.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:25 vm04 ceph-mon[46823]: pgmap v14: 161 pgs: 161 active+clean; 457 KiB data, 85 MiB used, 160 GiB / 160 GiB avail; 767 B/s rd, 0 op/s 2026-03-09T00:03:26.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:25 vm04 ceph-mon[51053]: pgmap v14: 161 pgs: 161 active+clean; 457 KiB data, 85 MiB used, 160 GiB / 160 GiB avail; 767 B/s rd, 0 op/s 2026-03-09T00:03:26.320 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:03:26 vm10 systemd[1]: Stopping Ceph prometheus.a for fdcbddf6-1b49-11f1-80b0-7392062373f9... 2026-03-09T00:03:26.320 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:03:26 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[65912]: ts=2026-03-09T00:03:26.160Z caller=main.go:775 level=warn msg="Received SIGTERM, exiting gracefully..." 2026-03-09T00:03:26.320 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:03:26 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[65912]: ts=2026-03-09T00:03:26.160Z caller=main.go:798 level=info msg="Stopping scrape discovery manager..." 2026-03-09T00:03:26.320 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:03:26 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[65912]: ts=2026-03-09T00:03:26.160Z caller=main.go:812 level=info msg="Stopping notify discovery manager..." 2026-03-09T00:03:26.321 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:03:26 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[65912]: ts=2026-03-09T00:03:26.160Z caller=main.go:834 level=info msg="Stopping scrape manager..." 2026-03-09T00:03:26.321 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:03:26 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[65912]: ts=2026-03-09T00:03:26.160Z caller=main.go:794 level=info msg="Scrape discovery manager stopped" 2026-03-09T00:03:26.321 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:03:26 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[65912]: ts=2026-03-09T00:03:26.160Z caller=main.go:808 level=info msg="Notify discovery manager stopped" 2026-03-09T00:03:26.321 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:03:26 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[65912]: ts=2026-03-09T00:03:26.161Z caller=manager.go:945 level=info component="rule manager" msg="Stopping rule manager..." 2026-03-09T00:03:26.321 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:03:26 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[65912]: ts=2026-03-09T00:03:26.161Z caller=main.go:828 level=info msg="Scrape manager stopped" 2026-03-09T00:03:26.321 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:03:26 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[65912]: ts=2026-03-09T00:03:26.161Z caller=manager.go:955 level=info component="rule manager" msg="Rule manager stopped" 2026-03-09T00:03:26.321 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:03:26 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[65912]: ts=2026-03-09T00:03:26.199Z caller=notifier.go:600 level=info component=notifier msg="Stopping notification manager..." 2026-03-09T00:03:26.321 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:03:26 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[65912]: ts=2026-03-09T00:03:26.199Z caller=main.go:1054 level=info msg="Notifier manager stopped" 2026-03-09T00:03:26.321 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:03:26 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[65912]: ts=2026-03-09T00:03:26.199Z caller=main.go:1066 level=info msg="See you next time!" 2026-03-09T00:03:26.321 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:03:26 vm10 podman[70528]: 2026-03-09 00:03:26.20324666 +0000 UTC m=+0.064528478 container died 60b4398433db55f3f63cc439bd8d81cb927296c1d68ccf45c47864b2281a6b2d (image=quay.io/prometheus/prometheus:v2.33.4, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-09T00:03:26.321 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:03:26 vm10 podman[70528]: 2026-03-09 00:03:26.262269423 +0000 UTC m=+0.123551250 container remove 60b4398433db55f3f63cc439bd8d81cb927296c1d68ccf45c47864b2281a6b2d (image=quay.io/prometheus/prometheus:v2.33.4, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-09T00:03:26.321 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:03:26 vm10 bash[70528]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a 2026-03-09T00:03:26.578 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:03:26 vm10 systemd[1]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@prometheus.a.service: Deactivated successfully. 2026-03-09T00:03:26.578 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:03:26 vm10 systemd[1]: Stopped Ceph prometheus.a for fdcbddf6-1b49-11f1-80b0-7392062373f9. 2026-03-09T00:03:26.578 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:03:26 vm10 systemd[1]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@prometheus.a.service: Consumed 1.107s CPU time. 2026-03-09T00:03:26.578 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:03:26 vm10 systemd[1]: Starting Ceph prometheus.a for fdcbddf6-1b49-11f1-80b0-7392062373f9... 2026-03-09T00:03:27.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:26 vm10 ceph-mon[48982]: pgmap v15: 161 pgs: 161 active+clean; 457 KiB data, 85 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 0 op/s 2026-03-09T00:03:27.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:26 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/2808217562' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-09T00:03:27.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:26 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/2039154124' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:6801/46665440"}]: dispatch 2026-03-09T00:03:27.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:26 vm10 ceph-mon[48982]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:27.078 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:03:26 vm10 podman[70637]: 2026-03-09 00:03:26.650826224 +0000 UTC m=+0.038891381 container create 98db0255a2819c6fb43a95c5a414635aea14993dd545d7ffbf3b76c344476396 (image=quay.io/prometheus/prometheus:v2.51.0, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-09T00:03:27.078 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:03:26 vm10 podman[70637]: 2026-03-09 00:03:26.695643905 +0000 UTC m=+0.083709051 container init 98db0255a2819c6fb43a95c5a414635aea14993dd545d7ffbf3b76c344476396 (image=quay.io/prometheus/prometheus:v2.51.0, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-09T00:03:27.078 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:03:26 vm10 podman[70637]: 2026-03-09 00:03:26.698728418 +0000 UTC m=+0.086793564 container start 98db0255a2819c6fb43a95c5a414635aea14993dd545d7ffbf3b76c344476396 (image=quay.io/prometheus/prometheus:v2.51.0, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-09T00:03:27.078 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:03:26 vm10 bash[70637]: 98db0255a2819c6fb43a95c5a414635aea14993dd545d7ffbf3b76c344476396 2026-03-09T00:03:27.078 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:03:26 vm10 podman[70637]: 2026-03-09 00:03:26.623174818 +0000 UTC m=+0.011239964 image pull 1d3b7f56885b6dd623f1785be963aa9c195f86bc256ea454e8d02a7980b79c53 quay.io/prometheus/prometheus:v2.51.0 2026-03-09T00:03:27.078 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:03:26 vm10 systemd[1]: Started Ceph prometheus.a for fdcbddf6-1b49-11f1-80b0-7392062373f9. 2026-03-09T00:03:27.078 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:03:26 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[70647]: ts=2026-03-09T00:03:26.724Z caller=main.go:617 level=info msg="Starting Prometheus Server" mode=server version="(version=2.51.0, branch=HEAD, revision=c05c15512acb675e3f6cd662a6727854e93fc024)" 2026-03-09T00:03:27.078 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:03:26 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[70647]: ts=2026-03-09T00:03:26.725Z caller=main.go:622 level=info build_context="(go=go1.22.1, platform=linux/amd64, user=root@b5723e458358, date=20240319-10:54:45, tags=netgo,builtinassets,stringlabels)" 2026-03-09T00:03:27.078 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:03:26 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[70647]: ts=2026-03-09T00:03:26.725Z caller=main.go:623 level=info host_details="(Linux 5.14.0-686.el9.x86_64 #1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026 x86_64 vm10 (none))" 2026-03-09T00:03:27.078 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:03:26 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[70647]: ts=2026-03-09T00:03:26.725Z caller=main.go:624 level=info fd_limits="(soft=1048576, hard=1048576)" 2026-03-09T00:03:27.078 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:03:26 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[70647]: ts=2026-03-09T00:03:26.725Z caller=main.go:625 level=info vm_limits="(soft=unlimited, hard=unlimited)" 2026-03-09T00:03:27.078 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:03:26 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[70647]: ts=2026-03-09T00:03:26.727Z caller=web.go:568 level=info component=web msg="Start listening for connections" address=:9095 2026-03-09T00:03:27.078 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:03:26 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[70647]: ts=2026-03-09T00:03:26.728Z caller=main.go:1129 level=info msg="Starting TSDB ..." 2026-03-09T00:03:27.078 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:03:26 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[70647]: ts=2026-03-09T00:03:26.731Z caller=head.go:616 level=info component=tsdb msg="Replaying on-disk memory mappable chunks if any" 2026-03-09T00:03:27.078 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:03:26 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[70647]: ts=2026-03-09T00:03:26.732Z caller=tls_config.go:313 level=info component=web msg="Listening on" address=[::]:9095 2026-03-09T00:03:27.078 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:03:26 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[70647]: ts=2026-03-09T00:03:26.732Z caller=tls_config.go:316 level=info component=web msg="TLS is disabled." http2=false address=[::]:9095 2026-03-09T00:03:27.078 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:03:26 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[70647]: ts=2026-03-09T00:03:26.732Z caller=head.go:698 level=info component=tsdb msg="On-disk memory mappable chunks replay completed" duration=913.279µs 2026-03-09T00:03:27.078 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:03:26 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[70647]: ts=2026-03-09T00:03:26.732Z caller=head.go:706 level=info component=tsdb msg="Replaying WAL, this may take a while" 2026-03-09T00:03:27.078 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:03:26 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[70647]: ts=2026-03-09T00:03:26.745Z caller=head.go:778 level=info component=tsdb msg="WAL segment loaded" segment=0 maxSegment=2 2026-03-09T00:03:27.078 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:03:26 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[70647]: ts=2026-03-09T00:03:26.750Z caller=head.go:778 level=info component=tsdb msg="WAL segment loaded" segment=1 maxSegment=2 2026-03-09T00:03:27.078 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:03:26 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[70647]: ts=2026-03-09T00:03:26.751Z caller=head.go:778 level=info component=tsdb msg="WAL segment loaded" segment=2 maxSegment=2 2026-03-09T00:03:27.078 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:03:26 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[70647]: ts=2026-03-09T00:03:26.751Z caller=head.go:815 level=info component=tsdb msg="WAL replay completed" checkpoint_replay_duration=20.708µs wal_replay_duration=19.454045ms wbl_replay_duration=130ns total_replay_duration=20.409583ms 2026-03-09T00:03:27.078 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:03:26 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[70647]: ts=2026-03-09T00:03:26.755Z caller=main.go:1150 level=info fs_type=XFS_SUPER_MAGIC 2026-03-09T00:03:27.078 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:03:26 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[70647]: ts=2026-03-09T00:03:26.755Z caller=main.go:1153 level=info msg="TSDB started" 2026-03-09T00:03:27.078 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:03:26 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[70647]: ts=2026-03-09T00:03:26.755Z caller=main.go:1335 level=info msg="Loading configuration file" filename=/etc/prometheus/prometheus.yml 2026-03-09T00:03:27.078 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:03:26 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[70647]: ts=2026-03-09T00:03:26.768Z caller=main.go:1372 level=info msg="Completed loading of configuration file" filename=/etc/prometheus/prometheus.yml totalDuration=12.499641ms db_storage=1.042µs remote_storage=1.383µs web_handler=681ns query_engine=1.293µs scrape=817.68µs scrape_sd=88.696µs notify=9.638µs notify_sd=7.234µs rules=11.261866ms tracing=4.798µs 2026-03-09T00:03:27.079 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:03:26 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[70647]: ts=2026-03-09T00:03:26.768Z caller=main.go:1114 level=info msg="Server is ready to receive web requests." 2026-03-09T00:03:27.079 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:03:26 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[70647]: ts=2026-03-09T00:03:26.768Z caller=manager.go:163 level=info component="rule manager" msg="Starting rule manager..." 2026-03-09T00:03:27.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:26 vm04 ceph-mon[46823]: pgmap v15: 161 pgs: 161 active+clean; 457 KiB data, 85 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 0 op/s 2026-03-09T00:03:27.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:26 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/2808217562' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-09T00:03:27.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:26 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/2039154124' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:6801/46665440"}]: dispatch 2026-03-09T00:03:27.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:26 vm04 ceph-mon[46823]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:27.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:26 vm04 ceph-mon[51053]: pgmap v15: 161 pgs: 161 active+clean; 457 KiB data, 85 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 0 op/s 2026-03-09T00:03:27.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:26 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/2808217562' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-09T00:03:27.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:26 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/2039154124' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:6801/46665440"}]: dispatch 2026-03-09T00:03:27.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:26 vm04 ceph-mon[51053]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:27.828 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:03:27 vm10 systemd[1]: Stopping Ceph grafana.a for fdcbddf6-1b49-11f1-80b0-7392062373f9... 2026-03-09T00:03:27.828 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:03:27 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-09T00:03:27+0000 lvl=info msg="Shutdown started" logger=server reason="System signal: terminated" 2026-03-09T00:03:27.828 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:03:27 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[64174]: t=2026-03-09T00:03:27+0000 lvl=info msg="Database locked, sleeping then retrying" logger=sqlstore error="database is locked" retry=0 2026-03-09T00:03:27.828 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:03:27 vm10 podman[70777]: 2026-03-09 00:03:27.681685868 +0000 UTC m=+0.065554988 container died a1c7daa645e2616c6a6b16da5b2253f802094d5f2aec4e8c942fe23d59861c31 (image=quay.io/ceph/ceph-grafana:8.3.5, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a, name=ubi8, io.buildah.version=1.24.2, architecture=x86_64, io.openshift.expose-services=, description=Ceph Grafana Container, io.k8s.display-name=Red Hat Universal Base Image 8, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.5-236.1648460182, com.redhat.component=ubi8-container, summary=Grafana Container configured for Ceph mgr/dashboard integration, vendor=Red Hat, Inc., release=236.1648460182, io.openshift.tags=base rhel8, vcs-ref=3aadd00326f3dd6cfe65ee31017ab98915fddb56, version=8.5, build-date=2022-03-28T10:36:18.413762, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, vcs-type=git, maintainer=Paul Cuzner , distribution-scope=public, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly.) 2026-03-09T00:03:28.159 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:27 vm10 ceph-mon[48982]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:28.160 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:27 vm10 ceph-mon[48982]: Reconfiguring grafana.a (dependencies changed)... 2026-03-09T00:03:28.160 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:27 vm10 ceph-mon[48982]: Regenerating cephadm self-signed grafana TLS certificates 2026-03-09T00:03:28.160 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:27 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/2039154124' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:6801/46665440"}]': finished 2026-03-09T00:03:28.160 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:27 vm10 ceph-mon[48982]: osdmap e81: 8 total, 8 up, 8 in 2026-03-09T00:03:28.160 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:27 vm10 ceph-mon[48982]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:28.160 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:27 vm10 ceph-mon[48982]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:28.160 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:27 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-09T00:03:28.160 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:27 vm10 ceph-mon[48982]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-09T00:03:28.160 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:27 vm10 ceph-mon[48982]: Reconfiguring daemon grafana.a on vm10 2026-03-09T00:03:28.160 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:27 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/1043650022' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:6800/46665440"}]: dispatch 2026-03-09T00:03:28.160 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:27 vm10 ceph-mon[48982]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:6800/46665440"}]: dispatch 2026-03-09T00:03:28.160 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:03:27 vm10 podman[70777]: 2026-03-09 00:03:27.908002639 +0000 UTC m=+0.291871750 container remove a1c7daa645e2616c6a6b16da5b2253f802094d5f2aec4e8c942fe23d59861c31 (image=quay.io/ceph/ceph-grafana:8.3.5, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a, release=236.1648460182, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.tags=base rhel8, architecture=x86_64, name=ubi8, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.5-236.1648460182, com.redhat.component=ubi8-container, maintainer=Paul Cuzner , vcs-type=git, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, description=Ceph Grafana Container, vcs-ref=3aadd00326f3dd6cfe65ee31017ab98915fddb56, build-date=2022-03-28T10:36:18.413762, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, version=8.5, distribution-scope=public, io.buildah.version=1.24.2, io.k8s.display-name=Red Hat Universal Base Image 8, vendor=Red Hat, Inc., io.openshift.expose-services=, summary=Grafana Container configured for Ceph mgr/dashboard integration) 2026-03-09T00:03:28.160 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:03:27 vm10 bash[70777]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a 2026-03-09T00:03:28.160 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:03:27 vm10 bash[70796]: Error: no container with name or ID "ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana.a" found: no such container 2026-03-09T00:03:28.160 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:03:27 vm10 systemd[1]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@grafana.a.service: Deactivated successfully. 2026-03-09T00:03:28.160 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:03:27 vm10 systemd[1]: Stopped Ceph grafana.a for fdcbddf6-1b49-11f1-80b0-7392062373f9. 2026-03-09T00:03:28.160 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:03:27 vm10 systemd[1]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@grafana.a.service: Consumed 1.666s CPU time. 2026-03-09T00:03:28.160 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:03:27 vm10 systemd[1]: Starting Ceph grafana.a for fdcbddf6-1b49-11f1-80b0-7392062373f9... 2026-03-09T00:03:28.160 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:03:28 vm10 podman[70836]: 2026-03-09 00:03:28.070984789 +0000 UTC m=+0.021916345 container create 9fb25843918bddd8da0697768c2a38bac3ffe45e5b4ab57da26320250e1a5465 (image=quay.io/ceph/ceph-grafana:8.3.5, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a, architecture=x86_64, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, summary=Grafana Container configured for Ceph mgr/dashboard integration, vcs-ref=3aadd00326f3dd6cfe65ee31017ab98915fddb56, com.redhat.component=ubi8-container, maintainer=Paul Cuzner , vcs-type=git, vendor=Red Hat, Inc., version=8.5, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, release=236.1648460182, io.k8s.display-name=Red Hat Universal Base Image 8, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.5-236.1648460182, description=Ceph Grafana Container, io.openshift.expose-services=, io.openshift.tags=base rhel8, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., distribution-scope=public, name=ubi8, io.buildah.version=1.24.2, build-date=2022-03-28T10:36:18.413762) 2026-03-09T00:03:28.160 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:03:28 vm10 podman[70836]: 2026-03-09 00:03:28.128919986 +0000 UTC m=+0.079851553 container init 9fb25843918bddd8da0697768c2a38bac3ffe45e5b4ab57da26320250e1a5465 (image=quay.io/ceph/ceph-grafana:8.3.5, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a, io.k8s.display-name=Red Hat Universal Base Image 8, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.5-236.1648460182, vcs-type=git, architecture=x86_64, maintainer=Paul Cuzner , com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, release=236.1648460182, summary=Grafana Container configured for Ceph mgr/dashboard integration, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, com.redhat.component=ubi8-container, version=8.5, io.openshift.tags=base rhel8, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.buildah.version=1.24.2, build-date=2022-03-28T10:36:18.413762, name=ubi8, vendor=Red Hat, Inc., description=Ceph Grafana Container, io.openshift.expose-services=, vcs-ref=3aadd00326f3dd6cfe65ee31017ab98915fddb56, distribution-scope=public) 2026-03-09T00:03:28.160 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:03:28 vm10 podman[70836]: 2026-03-09 00:03:28.131975564 +0000 UTC m=+0.082907131 container start 9fb25843918bddd8da0697768c2a38bac3ffe45e5b4ab57da26320250e1a5465 (image=quay.io/ceph/ceph-grafana:8.3.5, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a, distribution-scope=public, name=ubi8, maintainer=Paul Cuzner , io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.k8s.display-name=Red Hat Universal Base Image 8, build-date=2022-03-28T10:36:18.413762, com.redhat.component=ubi8-container, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, description=Ceph Grafana Container, io.openshift.tags=base rhel8, io.buildah.version=1.24.2, architecture=x86_64, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, vcs-type=git, vendor=Red Hat, Inc., version=8.5, release=236.1648460182, summary=Grafana Container configured for Ceph mgr/dashboard integration, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.5-236.1648460182, io.openshift.expose-services=, vcs-ref=3aadd00326f3dd6cfe65ee31017ab98915fddb56) 2026-03-09T00:03:28.160 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:03:28 vm10 bash[70836]: 9fb25843918bddd8da0697768c2a38bac3ffe45e5b4ab57da26320250e1a5465 2026-03-09T00:03:28.160 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:03:28 vm10 podman[70836]: 2026-03-09 00:03:28.059847626 +0000 UTC m=+0.010779193 image pull dad864ee21e98e69f4029d1e417aa085001566be0d322fbc75bc6f29b0050c01 quay.io/ceph/ceph-grafana:8.3.5 2026-03-09T00:03:28.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:27 vm04 ceph-mon[46823]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:28.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:27 vm04 ceph-mon[46823]: Reconfiguring grafana.a (dependencies changed)... 2026-03-09T00:03:28.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:27 vm04 ceph-mon[46823]: Regenerating cephadm self-signed grafana TLS certificates 2026-03-09T00:03:28.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:27 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/2039154124' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:6801/46665440"}]': finished 2026-03-09T00:03:28.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:27 vm04 ceph-mon[46823]: osdmap e81: 8 total, 8 up, 8 in 2026-03-09T00:03:28.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:27 vm04 ceph-mon[46823]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:28.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:27 vm04 ceph-mon[46823]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:28.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:27 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-09T00:03:28.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:27 vm04 ceph-mon[46823]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-09T00:03:28.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:27 vm04 ceph-mon[46823]: Reconfiguring daemon grafana.a on vm10 2026-03-09T00:03:28.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:27 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/1043650022' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:6800/46665440"}]: dispatch 2026-03-09T00:03:28.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:27 vm04 ceph-mon[46823]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:6800/46665440"}]: dispatch 2026-03-09T00:03:28.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:27 vm04 ceph-mon[51053]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:28.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:27 vm04 ceph-mon[51053]: Reconfiguring grafana.a (dependencies changed)... 2026-03-09T00:03:28.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:27 vm04 ceph-mon[51053]: Regenerating cephadm self-signed grafana TLS certificates 2026-03-09T00:03:28.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:27 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/2039154124' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:6801/46665440"}]': finished 2026-03-09T00:03:28.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:27 vm04 ceph-mon[51053]: osdmap e81: 8 total, 8 up, 8 in 2026-03-09T00:03:28.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:27 vm04 ceph-mon[51053]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:28.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:27 vm04 ceph-mon[51053]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:28.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:27 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-09T00:03:28.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:27 vm04 ceph-mon[51053]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-09T00:03:28.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:27 vm04 ceph-mon[51053]: Reconfiguring daemon grafana.a on vm10 2026-03-09T00:03:28.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:27 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/1043650022' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:6800/46665440"}]: dispatch 2026-03-09T00:03:28.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:27 vm04 ceph-mon[51053]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:6800/46665440"}]: dispatch 2026-03-09T00:03:28.578 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:03:28 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: [09/Mar/2026:00:03:28] ENGINE Bus STOPPING 2026-03-09T00:03:28.578 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:03:28 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[70846]: t=2026-03-09T00:03:28+0000 lvl=info msg="The state of unified alerting is still not defined. The decision will be made during as we run the database migrations" logger=settings 2026-03-09T00:03:28.578 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:03:28 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[70846]: t=2026-03-09T00:03:28+0000 lvl=warn msg="falling back to legacy setting of 'min_interval_seconds'; please use the configuration option in the `unified_alerting` section if Grafana 8 alerts are enabled." logger=settings 2026-03-09T00:03:28.578 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:03:28 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[70846]: t=2026-03-09T00:03:28+0000 lvl=info msg="Config loaded from" logger=settings file=/usr/share/grafana/conf/defaults.ini 2026-03-09T00:03:28.578 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:03:28 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[70846]: t=2026-03-09T00:03:28+0000 lvl=info msg="Config loaded from" logger=settings file=/etc/grafana/grafana.ini 2026-03-09T00:03:28.578 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:03:28 vm10 systemd[1]: Started Ceph grafana.a for fdcbddf6-1b49-11f1-80b0-7392062373f9. 2026-03-09T00:03:28.578 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:03:28 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[70846]: t=2026-03-09T00:03:28+0000 lvl=info msg="Config overridden from Environment variable" logger=settings var="GF_PATHS_DATA=/var/lib/grafana" 2026-03-09T00:03:28.578 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:03:28 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[70846]: t=2026-03-09T00:03:28+0000 lvl=info msg="Config overridden from Environment variable" logger=settings var="GF_PATHS_LOGS=/var/log/grafana" 2026-03-09T00:03:28.578 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:03:28 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[70846]: t=2026-03-09T00:03:28+0000 lvl=info msg="Config overridden from Environment variable" logger=settings var="GF_PATHS_PLUGINS=/var/lib/grafana/plugins" 2026-03-09T00:03:28.578 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:03:28 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[70846]: t=2026-03-09T00:03:28+0000 lvl=info msg="Config overridden from Environment variable" logger=settings var="GF_PATHS_PROVISIONING=/etc/grafana/provisioning" 2026-03-09T00:03:28.578 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:03:28 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[70846]: t=2026-03-09T00:03:28+0000 lvl=info msg="Path Home" logger=settings path=/usr/share/grafana 2026-03-09T00:03:28.578 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:03:28 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[70846]: t=2026-03-09T00:03:28+0000 lvl=info msg="Path Data" logger=settings path=/var/lib/grafana 2026-03-09T00:03:28.578 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:03:28 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[70846]: t=2026-03-09T00:03:28+0000 lvl=info msg="Path Logs" logger=settings path=/var/log/grafana 2026-03-09T00:03:28.578 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:03:28 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[70846]: t=2026-03-09T00:03:28+0000 lvl=info msg="Path Plugins" logger=settings path=/var/lib/grafana/plugins 2026-03-09T00:03:28.578 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:03:28 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[70846]: t=2026-03-09T00:03:28+0000 lvl=info msg="Path Provisioning" logger=settings path=/etc/grafana/provisioning 2026-03-09T00:03:28.579 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:03:28 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[70846]: t=2026-03-09T00:03:28+0000 lvl=info msg="App mode production" logger=settings 2026-03-09T00:03:28.579 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:03:28 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[70846]: t=2026-03-09T00:03:28+0000 lvl=info msg="Connecting to DB" logger=sqlstore dbtype=sqlite3 2026-03-09T00:03:28.579 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:03:28 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[70846]: t=2026-03-09T00:03:28+0000 lvl=warn msg="SQLite database file has broader permissions than it should" logger=sqlstore path=/var/lib/grafana/grafana.db mode=-rw-r--r-- expected=-rw-r----- 2026-03-09T00:03:28.579 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:03:28 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[70846]: t=2026-03-09T00:03:28+0000 lvl=info msg="Starting DB migrations" logger=migrator 2026-03-09T00:03:28.579 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:03:28 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[70846]: t=2026-03-09T00:03:28+0000 lvl=info msg="migrations completed" logger=migrator performed=0 skipped=377 duration=294.202µs 2026-03-09T00:03:28.579 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:03:28 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[70846]: t=2026-03-09T00:03:28+0000 lvl=info msg="Created default organization" logger=sqlstore 2026-03-09T00:03:28.579 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:03:28 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[70846]: t=2026-03-09T00:03:28+0000 lvl=info msg="Initialising plugins" logger=plugin.manager 2026-03-09T00:03:28.579 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:03:28 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[70846]: t=2026-03-09T00:03:28+0000 lvl=info msg="Plugin registered" logger=plugin.manager pluginId=input 2026-03-09T00:03:28.579 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:03:28 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[70846]: t=2026-03-09T00:03:28+0000 lvl=info msg="Plugin registered" logger=plugin.manager pluginId=grafana-piechart-panel 2026-03-09T00:03:28.579 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:03:28 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[70846]: t=2026-03-09T00:03:28+0000 lvl=info msg="Plugin registered" logger=plugin.manager pluginId=vonage-status-panel 2026-03-09T00:03:28.579 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:03:28 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[70846]: t=2026-03-09T00:03:28+0000 lvl=info msg="Live Push Gateway initialization" logger=live.push_http 2026-03-09T00:03:28.579 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:03:28 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[70846]: t=2026-03-09T00:03:28+0000 lvl=info msg="deleted datasource based on configuration" logger=provisioning.datasources name=Dashboard1 2026-03-09T00:03:28.579 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:03:28 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[70846]: t=2026-03-09T00:03:28+0000 lvl=info msg="inserting datasource from configuration " logger=provisioning.datasources name=Dashboard1 uid=P43CA22E17D0F9596 2026-03-09T00:03:28.579 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:03:28 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[70846]: t=2026-03-09T00:03:28+0000 lvl=info msg="inserting datasource from configuration " logger=provisioning.datasources name=Loki uid=P8E80F9AEF21F6940 2026-03-09T00:03:28.579 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:03:28 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[70846]: t=2026-03-09T00:03:28+0000 lvl=info msg="HTTP Server Listen" logger=http.server address=[::]:3000 protocol=https subUrl= socket= 2026-03-09T00:03:28.579 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:03:28 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[70846]: t=2026-03-09T00:03:28+0000 lvl=info msg="warming cache for startup" logger=ngalert 2026-03-09T00:03:28.579 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:03:28 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[70846]: t=2026-03-09T00:03:28+0000 lvl=info msg="starting MultiOrg Alertmanager" logger=ngalert.multiorg.alertmanager 2026-03-09T00:03:29.078 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:03:28 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: [09/Mar/2026:00:03:28] ENGINE HTTP Server cherrypy._cpwsgi_server.CPWSGIServer(('::', 9283)) shut down 2026-03-09T00:03:29.078 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:03:28 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: [09/Mar/2026:00:03:28] ENGINE Bus STOPPED 2026-03-09T00:03:29.078 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:03:28 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: [09/Mar/2026:00:03:28] ENGINE Bus STARTING 2026-03-09T00:03:29.078 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:03:28 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: [09/Mar/2026:00:03:28] ENGINE Serving on http://:::9283 2026-03-09T00:03:29.078 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:03:28 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: [09/Mar/2026:00:03:28] ENGINE Bus STARTED 2026-03-09T00:03:29.078 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:03:28 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: [09/Mar/2026:00:03:28] ENGINE Bus STOPPING 2026-03-09T00:03:29.485 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:29 vm04 ceph-mon[46823]: pgmap v17: 161 pgs: 161 active+clean; 457 KiB data, 85 MiB used, 160 GiB / 160 GiB avail; 409 B/s rd, 0 op/s 2026-03-09T00:03:29.485 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:29 vm04 ceph-mon[46823]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:6800/46665440"}]': finished 2026-03-09T00:03:29.485 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:29 vm04 ceph-mon[46823]: osdmap e82: 8 total, 8 up, 8 in 2026-03-09T00:03:29.485 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:29 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/172912345' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/961449681"}]: dispatch 2026-03-09T00:03:29.485 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:29 vm04 ceph-mon[46823]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:29.485 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:29 vm04 ceph-mon[46823]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:29.485 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:29 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-09T00:03:29.485 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:29 vm04 ceph-mon[46823]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-09T00:03:29.485 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:29 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://vm04.local:9093"}]: dispatch 2026-03-09T00:03:29.485 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:29 vm04 ceph-mon[46823]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://vm04.local:9093"}]: dispatch 2026-03-09T00:03:29.485 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:29 vm04 ceph-mon[46823]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:29.485 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:29 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-09T00:03:29.485 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:29 vm04 ceph-mon[46823]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-09T00:03:29.485 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:29 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://host.containers.internal:9095"}]: dispatch 2026-03-09T00:03:29.485 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:29 vm04 ceph-mon[46823]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://host.containers.internal:9095"}]: dispatch 2026-03-09T00:03:29.485 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:29 vm04 ceph-mon[46823]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:29.485 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:29 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-09T00:03:29.485 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:29 vm04 ceph-mon[46823]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-09T00:03:29.485 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:29 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://host.containers.internal:3000"}]: dispatch 2026-03-09T00:03:29.485 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:29 vm04 ceph-mon[46823]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://host.containers.internal:3000"}]: dispatch 2026-03-09T00:03:29.485 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:29 vm04 ceph-mon[46823]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:29.485 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:29 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:03:29.485 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:29 vm04 ceph-mon[51053]: pgmap v17: 161 pgs: 161 active+clean; 457 KiB data, 85 MiB used, 160 GiB / 160 GiB avail; 409 B/s rd, 0 op/s 2026-03-09T00:03:29.485 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:29 vm04 ceph-mon[51053]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:6800/46665440"}]': finished 2026-03-09T00:03:29.485 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:29 vm04 ceph-mon[51053]: osdmap e82: 8 total, 8 up, 8 in 2026-03-09T00:03:29.485 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:29 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/172912345' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/961449681"}]: dispatch 2026-03-09T00:03:29.485 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:29 vm04 ceph-mon[51053]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:29.485 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:29 vm04 ceph-mon[51053]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:29.485 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:29 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-09T00:03:29.485 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:29 vm04 ceph-mon[51053]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-09T00:03:29.485 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:29 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://vm04.local:9093"}]: dispatch 2026-03-09T00:03:29.485 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:29 vm04 ceph-mon[51053]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://vm04.local:9093"}]: dispatch 2026-03-09T00:03:29.485 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:29 vm04 ceph-mon[51053]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:29.485 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:29 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-09T00:03:29.485 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:29 vm04 ceph-mon[51053]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-09T00:03:29.485 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:29 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://host.containers.internal:9095"}]: dispatch 2026-03-09T00:03:29.485 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:29 vm04 ceph-mon[51053]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://host.containers.internal:9095"}]: dispatch 2026-03-09T00:03:29.485 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:29 vm04 ceph-mon[51053]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:29.486 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:29 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-09T00:03:29.486 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:29 vm04 ceph-mon[51053]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-09T00:03:29.486 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:29 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://host.containers.internal:3000"}]: dispatch 2026-03-09T00:03:29.486 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:29 vm04 ceph-mon[51053]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://host.containers.internal:3000"}]: dispatch 2026-03-09T00:03:29.486 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:29 vm04 ceph-mon[51053]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:29.486 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:29 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:03:29.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:29 vm10 ceph-mon[48982]: pgmap v17: 161 pgs: 161 active+clean; 457 KiB data, 85 MiB used, 160 GiB / 160 GiB avail; 409 B/s rd, 0 op/s 2026-03-09T00:03:29.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:29 vm10 ceph-mon[48982]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:6800/46665440"}]': finished 2026-03-09T00:03:29.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:29 vm10 ceph-mon[48982]: osdmap e82: 8 total, 8 up, 8 in 2026-03-09T00:03:29.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:29 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/172912345' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/961449681"}]: dispatch 2026-03-09T00:03:29.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:29 vm10 ceph-mon[48982]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:29.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:29 vm10 ceph-mon[48982]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:29.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:29 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-09T00:03:29.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:29 vm10 ceph-mon[48982]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-09T00:03:29.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:29 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://vm04.local:9093"}]: dispatch 2026-03-09T00:03:29.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:29 vm10 ceph-mon[48982]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://vm04.local:9093"}]: dispatch 2026-03-09T00:03:29.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:29 vm10 ceph-mon[48982]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:29.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:29 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-09T00:03:29.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:29 vm10 ceph-mon[48982]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-09T00:03:29.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:29 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://host.containers.internal:9095"}]: dispatch 2026-03-09T00:03:29.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:29 vm10 ceph-mon[48982]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://host.containers.internal:9095"}]: dispatch 2026-03-09T00:03:29.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:29 vm10 ceph-mon[48982]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:29.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:29 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-09T00:03:29.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:29 vm10 ceph-mon[48982]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-09T00:03:29.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:29 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://host.containers.internal:3000"}]: dispatch 2026-03-09T00:03:29.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:29 vm10 ceph-mon[48982]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://host.containers.internal:3000"}]: dispatch 2026-03-09T00:03:29.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:29 vm10 ceph-mon[48982]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:29.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:29 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:03:29.578 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:03:29 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: [09/Mar/2026:00:03:29] ENGINE HTTP Server cherrypy._cpwsgi_server.CPWSGIServer(('::', 9283)) shut down 2026-03-09T00:03:29.578 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:03:29 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: [09/Mar/2026:00:03:29] ENGINE Bus STOPPED 2026-03-09T00:03:29.578 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:03:29 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: [09/Mar/2026:00:03:29] ENGINE Bus STARTING 2026-03-09T00:03:29.578 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:03:29 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: [09/Mar/2026:00:03:29] ENGINE Serving on http://:::9283 2026-03-09T00:03:29.578 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:03:29 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: [09/Mar/2026:00:03:29] ENGINE Bus STARTED 2026-03-09T00:03:29.578 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:03:29 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: [09/Mar/2026:00:03:29] ENGINE Bus STOPPING 2026-03-09T00:03:30.327 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:03:29 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: [09/Mar/2026:00:03:29] ENGINE HTTP Server cherrypy._cpwsgi_server.CPWSGIServer(('::', 9283)) shut down 2026-03-09T00:03:30.327 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:03:29 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: [09/Mar/2026:00:03:29] ENGINE Bus STOPPED 2026-03-09T00:03:30.327 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:03:29 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: [09/Mar/2026:00:03:29] ENGINE Bus STARTING 2026-03-09T00:03:30.327 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:03:29 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: [09/Mar/2026:00:03:29] ENGINE Serving on http://:::9283 2026-03-09T00:03:30.328 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:03:29 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: [09/Mar/2026:00:03:29] ENGINE Bus STARTED 2026-03-09T00:03:30.670 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:30 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/172912345' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/961449681"}]': finished 2026-03-09T00:03:30.670 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:30 vm04 ceph-mon[46823]: osdmap e83: 8 total, 8 up, 8 in 2026-03-09T00:03:30.670 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:30 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/2822406549' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/1907326865"}]: dispatch 2026-03-09T00:03:30.670 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:30 vm04 ceph-mon[46823]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/1907326865"}]: dispatch 2026-03-09T00:03:30.670 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:30 vm04 ceph-mon[46823]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:30.670 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:30 vm04 ceph-mon[46823]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:30.670 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:30 vm04 ceph-mon[46823]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:30.670 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:30 vm04 ceph-mon[46823]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:30.670 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:30 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:03:30.670 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:30 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:03:30.670 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:30 vm04 ceph-mon[46823]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:30.670 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:30 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm04.fbyciv", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T00:03:30.670 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:30 vm04 ceph-mon[46823]: from='mgr.24772 ' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm04.fbyciv", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T00:03:30.670 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:30 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:03:30.671 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:30 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/172912345' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/961449681"}]': finished 2026-03-09T00:03:30.671 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:30 vm04 ceph-mon[51053]: osdmap e83: 8 total, 8 up, 8 in 2026-03-09T00:03:30.671 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:30 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/2822406549' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/1907326865"}]: dispatch 2026-03-09T00:03:30.671 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:30 vm04 ceph-mon[51053]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/1907326865"}]: dispatch 2026-03-09T00:03:30.671 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:30 vm04 ceph-mon[51053]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:30.671 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:30 vm04 ceph-mon[51053]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:30.671 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:30 vm04 ceph-mon[51053]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:30.671 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:30 vm04 ceph-mon[51053]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:30.671 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:30 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:03:30.671 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:30 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:03:30.671 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:30 vm04 ceph-mon[51053]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:30.671 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:30 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm04.fbyciv", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T00:03:30.671 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:30 vm04 ceph-mon[51053]: from='mgr.24772 ' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm04.fbyciv", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T00:03:30.671 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:30 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:03:30.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:30 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/172912345' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/961449681"}]': finished 2026-03-09T00:03:30.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:30 vm10 ceph-mon[48982]: osdmap e83: 8 total, 8 up, 8 in 2026-03-09T00:03:30.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:30 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/2822406549' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/1907326865"}]: dispatch 2026-03-09T00:03:30.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:30 vm10 ceph-mon[48982]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/1907326865"}]: dispatch 2026-03-09T00:03:30.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:30 vm10 ceph-mon[48982]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:30.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:30 vm10 ceph-mon[48982]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:30.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:30 vm10 ceph-mon[48982]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:30.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:30 vm10 ceph-mon[48982]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:30.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:30 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:03:30.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:30 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:03:30.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:30 vm10 ceph-mon[48982]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:30.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:30 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm04.fbyciv", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T00:03:30.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:30 vm10 ceph-mon[48982]: from='mgr.24772 ' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm04.fbyciv", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T00:03:30.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:30 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:03:31.700 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:31 vm04 ceph-mon[46823]: pgmap v20: 161 pgs: 161 active+clean; 457 KiB data, 85 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 0 op/s 2026-03-09T00:03:31.700 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:31 vm04 ceph-mon[46823]: Reconfiguring iscsi.foo.vm04.fbyciv (dependencies changed)... 2026-03-09T00:03:31.700 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:31 vm04 ceph-mon[46823]: Reconfiguring daemon iscsi.foo.vm04.fbyciv on vm04 2026-03-09T00:03:31.700 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:31 vm04 ceph-mon[46823]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/1907326865"}]': finished 2026-03-09T00:03:31.700 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:31 vm04 ceph-mon[46823]: osdmap e84: 8 total, 8 up, 8 in 2026-03-09T00:03:31.700 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:31 vm04 ceph-mon[46823]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:31.700 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:31 vm04 ceph-mon[46823]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:31.700 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:31 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-09T00:03:31.700 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:31 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-09T00:03:31.700 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:31 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:03:31.700 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:31 vm04 ceph-mon[51053]: pgmap v20: 161 pgs: 161 active+clean; 457 KiB data, 85 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 0 op/s 2026-03-09T00:03:31.700 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:31 vm04 ceph-mon[51053]: Reconfiguring iscsi.foo.vm04.fbyciv (dependencies changed)... 2026-03-09T00:03:31.700 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:31 vm04 ceph-mon[51053]: Reconfiguring daemon iscsi.foo.vm04.fbyciv on vm04 2026-03-09T00:03:31.700 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:31 vm04 ceph-mon[51053]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/1907326865"}]': finished 2026-03-09T00:03:31.700 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:31 vm04 ceph-mon[51053]: osdmap e84: 8 total, 8 up, 8 in 2026-03-09T00:03:31.700 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:31 vm04 ceph-mon[51053]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:31.700 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:31 vm04 ceph-mon[51053]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:31.700 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:31 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-09T00:03:31.700 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:31 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-09T00:03:31.700 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:31 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:03:31.722 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:31 vm10 ceph-mon[48982]: pgmap v20: 161 pgs: 161 active+clean; 457 KiB data, 85 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 0 op/s 2026-03-09T00:03:31.722 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:31 vm10 ceph-mon[48982]: Reconfiguring iscsi.foo.vm04.fbyciv (dependencies changed)... 2026-03-09T00:03:31.722 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:31 vm10 ceph-mon[48982]: Reconfiguring daemon iscsi.foo.vm04.fbyciv on vm04 2026-03-09T00:03:31.722 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:31 vm10 ceph-mon[48982]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/1907326865"}]': finished 2026-03-09T00:03:31.722 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:31 vm10 ceph-mon[48982]: osdmap e84: 8 total, 8 up, 8 in 2026-03-09T00:03:31.722 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:31 vm10 ceph-mon[48982]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:31.722 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:31 vm10 ceph-mon[48982]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:31.723 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:31 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-09T00:03:31.723 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:31 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-09T00:03:31.723 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:31 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:03:32.476 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:32 vm10 ceph-mon[48982]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-09T00:03:32.476 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:32 vm10 ceph-mon[48982]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-09T00:03:32.476 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:32 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/1778395861' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-09T00:03:32.476 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:32 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/3793333776' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/3601859199"}]: dispatch 2026-03-09T00:03:32.723 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:32 vm04 ceph-mon[46823]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-09T00:03:32.723 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:32 vm04 ceph-mon[46823]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-09T00:03:32.723 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:32 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/1778395861' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-09T00:03:32.723 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:32 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/3793333776' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/3601859199"}]: dispatch 2026-03-09T00:03:32.723 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:32 vm04 ceph-mon[46823]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/3601859199"}]: dispatch 2026-03-09T00:03:32.723 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:32 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:03:32.723 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:32 vm04 ceph-mon[51053]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-09T00:03:32.723 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:32 vm04 ceph-mon[51053]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-09T00:03:32.723 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:32 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/1778395861' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-09T00:03:32.723 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:32 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/3793333776' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/3601859199"}]: dispatch 2026-03-09T00:03:32.723 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:32 vm04 ceph-mon[51053]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/3601859199"}]: dispatch 2026-03-09T00:03:32.723 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:32 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:03:32.749 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:32 vm10 ceph-mon[48982]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/3601859199"}]: dispatch 2026-03-09T00:03:32.749 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:32 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:03:33.328 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:03:33 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:03:33] "GET /metrics HTTP/1.1" 200 37529 "" "Prometheus/2.51.0" 2026-03-09T00:03:33.740 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:33 vm04 ceph-mon[51053]: pgmap v22: 161 pgs: 161 active+clean; 457 KiB data, 85 MiB used, 160 GiB / 160 GiB avail; 429 B/s rd, 0 op/s 2026-03-09T00:03:33.740 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:33 vm04 ceph-mon[51053]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/3601859199"}]': finished 2026-03-09T00:03:33.740 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:33 vm04 ceph-mon[51053]: osdmap e85: 8 total, 8 up, 8 in 2026-03-09T00:03:33.740 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:33 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/4100513787' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/3385791586"}]: dispatch 2026-03-09T00:03:33.740 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:33 vm04 ceph-mon[51053]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:33.740 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:33 vm04 ceph-mon[51053]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:33.741 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:33 vm04 ceph-mon[51053]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:33.741 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:33 vm04 ceph-mon[51053]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:33.741 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:33 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:03:33.741 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:33 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:03:33.741 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:33 vm04 ceph-mon[51053]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:33.741 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:33 vm04 ceph-mon[46823]: pgmap v22: 161 pgs: 161 active+clean; 457 KiB data, 85 MiB used, 160 GiB / 160 GiB avail; 429 B/s rd, 0 op/s 2026-03-09T00:03:33.741 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:33 vm04 ceph-mon[46823]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/3601859199"}]': finished 2026-03-09T00:03:33.741 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:33 vm04 ceph-mon[46823]: osdmap e85: 8 total, 8 up, 8 in 2026-03-09T00:03:33.741 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:33 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/4100513787' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/3385791586"}]: dispatch 2026-03-09T00:03:33.741 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:33 vm04 ceph-mon[46823]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:33.741 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:33 vm04 ceph-mon[46823]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:33.741 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:33 vm04 ceph-mon[46823]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:33.741 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:33 vm04 ceph-mon[46823]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:33.741 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:33 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:03:33.741 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:33 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:03:33.741 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:33 vm04 ceph-mon[46823]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:33.823 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:33 vm10 ceph-mon[48982]: pgmap v22: 161 pgs: 161 active+clean; 457 KiB data, 85 MiB used, 160 GiB / 160 GiB avail; 429 B/s rd, 0 op/s 2026-03-09T00:03:33.823 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:33 vm10 ceph-mon[48982]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/3601859199"}]': finished 2026-03-09T00:03:33.823 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:33 vm10 ceph-mon[48982]: osdmap e85: 8 total, 8 up, 8 in 2026-03-09T00:03:33.823 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:33 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/4100513787' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/3385791586"}]: dispatch 2026-03-09T00:03:33.823 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:33 vm10 ceph-mon[48982]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:33.823 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:33 vm10 ceph-mon[48982]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:33.823 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:33 vm10 ceph-mon[48982]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:33.823 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:33 vm10 ceph-mon[48982]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:33.823 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:33 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:03:33.823 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:33 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:03:33.823 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:33 vm10 ceph-mon[48982]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:03:34.578 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:03:34 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[70647]: ts=2026-03-09T00:03:34.146Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"fdcbddf6-1b49-11f1-80b0-7392062373f9\", cluster_addr=\"192.168.123.104\", device_class=\"hdd\", hostname=\"vm04\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.104\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.104\", device_class=\"hdd\", hostname=\"vm04\", instance=\"192.168.123.110:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.104\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T00:03:35.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:34 vm10 ceph-mon[48982]: pgmap v24: 161 pgs: 161 active+clean; 457 KiB data, 86 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-09T00:03:35.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:34 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/4100513787' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/3385791586"}]': finished 2026-03-09T00:03:35.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:34 vm10 ceph-mon[48982]: osdmap e86: 8 total, 8 up, 8 in 2026-03-09T00:03:35.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:34 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/3245038607' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/2938686256"}]: dispatch 2026-03-09T00:03:35.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:34 vm04 ceph-mon[51053]: pgmap v24: 161 pgs: 161 active+clean; 457 KiB data, 86 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-09T00:03:35.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:34 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/4100513787' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/3385791586"}]': finished 2026-03-09T00:03:35.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:34 vm04 ceph-mon[51053]: osdmap e86: 8 total, 8 up, 8 in 2026-03-09T00:03:35.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:34 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/3245038607' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/2938686256"}]: dispatch 2026-03-09T00:03:35.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:34 vm04 ceph-mon[46823]: pgmap v24: 161 pgs: 161 active+clean; 457 KiB data, 86 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-09T00:03:35.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:34 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/4100513787' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/3385791586"}]': finished 2026-03-09T00:03:35.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:34 vm04 ceph-mon[46823]: osdmap e86: 8 total, 8 up, 8 in 2026-03-09T00:03:35.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:34 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/3245038607' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/2938686256"}]: dispatch 2026-03-09T00:03:36.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:35 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/3245038607' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/2938686256"}]': finished 2026-03-09T00:03:36.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:35 vm10 ceph-mon[48982]: osdmap e87: 8 total, 8 up, 8 in 2026-03-09T00:03:36.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:35 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/3245038607' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/2938686256"}]': finished 2026-03-09T00:03:36.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:35 vm04 ceph-mon[51053]: osdmap e87: 8 total, 8 up, 8 in 2026-03-09T00:03:36.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:35 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/3245038607' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/2938686256"}]': finished 2026-03-09T00:03:36.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:35 vm04 ceph-mon[46823]: osdmap e87: 8 total, 8 up, 8 in 2026-03-09T00:03:37.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:36 vm10 ceph-mon[48982]: pgmap v27: 161 pgs: 161 active+clean; 457 KiB data, 86 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-09T00:03:37.328 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:03:36 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[70647]: ts=2026-03-09T00:03:36.949Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm04\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"fdcbddf6-1b49-11f1-80b0-7392062373f9\", domainname=\"(none)\", instance=\"vm04\", job=\"node\", machine=\"x86_64\", nodename=\"vm04\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm04\", job=\"node\", machine=\"x86_64\", nodename=\"vm04\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T00:03:37.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:36 vm04 ceph-mon[51053]: pgmap v27: 161 pgs: 161 active+clean; 457 KiB data, 86 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-09T00:03:37.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:36 vm04 ceph-mon[46823]: pgmap v27: 161 pgs: 161 active+clean; 457 KiB data, 86 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-09T00:03:38.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:38 vm10 ceph-mon[48982]: pgmap v28: 161 pgs: 161 active+clean; 457 KiB data, 86 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 1 op/s 2026-03-09T00:03:38.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:38 vm04 ceph-mon[51053]: pgmap v28: 161 pgs: 161 active+clean; 457 KiB data, 86 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 1 op/s 2026-03-09T00:03:38.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:38 vm04 ceph-mon[46823]: pgmap v28: 161 pgs: 161 active+clean; 457 KiB data, 86 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 1 op/s 2026-03-09T00:03:41.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:40 vm10 ceph-mon[48982]: pgmap v29: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.5 KiB/s rd, 1 op/s 2026-03-09T00:03:41.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:40 vm04 ceph-mon[51053]: pgmap v29: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.5 KiB/s rd, 1 op/s 2026-03-09T00:03:41.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:40 vm04 ceph-mon[46823]: pgmap v29: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.5 KiB/s rd, 1 op/s 2026-03-09T00:03:42.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:41 vm10 ceph-mon[48982]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:03:42.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:41 vm04 ceph-mon[51053]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:03:42.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:41 vm04 ceph-mon[46823]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:03:43.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:42 vm10 ceph-mon[48982]: pgmap v30: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 639 B/s rd, 0 op/s 2026-03-09T00:03:43.328 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:03:43 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:03:43] "GET /metrics HTTP/1.1" 200 37529 "" "Prometheus/2.51.0" 2026-03-09T00:03:43.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:42 vm04 ceph-mon[46823]: pgmap v30: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 639 B/s rd, 0 op/s 2026-03-09T00:03:43.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:42 vm04 ceph-mon[51053]: pgmap v30: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 639 B/s rd, 0 op/s 2026-03-09T00:03:44.578 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:03:44 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[70647]: ts=2026-03-09T00:03:44.146Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"fdcbddf6-1b49-11f1-80b0-7392062373f9\", cluster_addr=\"192.168.123.104\", device_class=\"hdd\", hostname=\"vm04\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.104\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.104\", device_class=\"hdd\", hostname=\"vm04\", instance=\"192.168.123.110:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.104\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T00:03:45.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:44 vm10 ceph-mon[48982]: pgmap v31: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.0 KiB/s rd, 1 op/s 2026-03-09T00:03:45.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:44 vm04 ceph-mon[51053]: pgmap v31: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.0 KiB/s rd, 1 op/s 2026-03-09T00:03:45.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:44 vm04 ceph-mon[46823]: pgmap v31: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.0 KiB/s rd, 1 op/s 2026-03-09T00:03:47.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:46 vm10 ceph-mon[48982]: pgmap v32: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 936 B/s rd, 0 op/s 2026-03-09T00:03:47.327 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:03:46 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[70647]: ts=2026-03-09T00:03:46.948Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm04\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"fdcbddf6-1b49-11f1-80b0-7392062373f9\", domainname=\"(none)\", instance=\"vm04\", job=\"node\", machine=\"x86_64\", nodename=\"vm04\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm04\", job=\"node\", machine=\"x86_64\", nodename=\"vm04\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T00:03:47.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:46 vm04 ceph-mon[51053]: pgmap v32: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 936 B/s rd, 0 op/s 2026-03-09T00:03:47.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:46 vm04 ceph-mon[46823]: pgmap v32: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 936 B/s rd, 0 op/s 2026-03-09T00:03:48.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:48 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:03:48.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:48 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:03:48.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:47 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:03:49.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:49 vm10 ceph-mon[48982]: pgmap v33: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:03:49.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:49 vm04 ceph-mon[51053]: pgmap v33: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:03:49.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:49 vm04 ceph-mon[46823]: pgmap v33: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:03:51.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:51 vm10 ceph-mon[48982]: pgmap v34: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:03:51.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:51 vm04 ceph-mon[51053]: pgmap v34: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:03:51.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:51 vm04 ceph-mon[46823]: pgmap v34: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:03:52.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:52 vm10 ceph-mon[48982]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:03:52.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:52 vm04 ceph-mon[51053]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:03:52.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:52 vm04 ceph-mon[46823]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:03:53.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:53 vm10 ceph-mon[48982]: pgmap v35: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:03:53.578 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:03:53 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:03:53] "GET /metrics HTTP/1.1" 200 37528 "" "Prometheus/2.51.0" 2026-03-09T00:03:53.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:53 vm04 ceph-mon[51053]: pgmap v35: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:03:53.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:53 vm04 ceph-mon[46823]: pgmap v35: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:03:54.578 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:03:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[70647]: ts=2026-03-09T00:03:54.147Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"fdcbddf6-1b49-11f1-80b0-7392062373f9\", cluster_addr=\"192.168.123.104\", device_class=\"hdd\", hostname=\"vm04\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.104\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.104\", device_class=\"hdd\", hostname=\"vm04\", instance=\"192.168.123.110:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.104\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T00:03:55.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:55 vm10 ceph-mon[48982]: pgmap v36: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:03:55.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:55 vm04 ceph-mon[51053]: pgmap v36: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:03:55.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:55 vm04 ceph-mon[46823]: pgmap v36: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:03:57.268 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:03:56 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[70647]: ts=2026-03-09T00:03:56.949Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm04\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"fdcbddf6-1b49-11f1-80b0-7392062373f9\", domainname=\"(none)\", instance=\"vm04\", job=\"node\", machine=\"x86_64\", nodename=\"vm04\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm04\", job=\"node\", machine=\"x86_64\", nodename=\"vm04\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T00:03:57.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:57 vm10 ceph-mon[48982]: pgmap v37: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:03:57.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:57 vm04 ceph-mon[51053]: pgmap v37: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:03:57.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:57 vm04 ceph-mon[46823]: pgmap v37: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:03:59.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:03:58 vm10 ceph-mon[48982]: pgmap v38: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:03:59.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:03:58 vm04 ceph-mon[51053]: pgmap v38: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:03:59.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:03:58 vm04 ceph-mon[46823]: pgmap v38: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:04:01.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:04:00 vm10 ceph-mon[48982]: pgmap v39: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:04:01.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:04:00 vm04 ceph-mon[51053]: pgmap v39: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:04:01.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:04:00 vm04 ceph-mon[46823]: pgmap v39: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:04:02.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:04:01 vm10 ceph-mon[48982]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:04:02.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:04:01 vm04 ceph-mon[51053]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:04:02.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:04:01 vm04 ceph-mon[46823]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:04:03.229 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:04:02 vm10 ceph-mon[48982]: pgmap v40: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:04:03.229 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:04:02 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:04:03.229 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:04:03 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:04:03] "GET /metrics HTTP/1.1" 200 37526 "" "Prometheus/2.51.0" 2026-03-09T00:04:03.252 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:04:02 vm04 ceph-mon[51053]: pgmap v40: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:04:03.255 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:04:02 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:04:03.255 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:04:02 vm04 ceph-mon[46823]: pgmap v40: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:04:03.255 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:04:02 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:04:04.328 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:04:04 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[70647]: ts=2026-03-09T00:04:04.146Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"fdcbddf6-1b49-11f1-80b0-7392062373f9\", cluster_addr=\"192.168.123.104\", device_class=\"hdd\", hostname=\"vm04\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.104\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.104\", device_class=\"hdd\", hostname=\"vm04\", instance=\"192.168.123.110:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.104\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T00:04:05.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:04:04 vm10 ceph-mon[48982]: pgmap v41: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:04:05.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:04:04 vm04 ceph-mon[51053]: pgmap v41: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:04:05.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:04:04 vm04 ceph-mon[46823]: pgmap v41: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:04:07.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:04:07 vm10 ceph-mon[48982]: pgmap v42: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:04:07.328 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:04:06 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[70647]: ts=2026-03-09T00:04:06.948Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm04\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"fdcbddf6-1b49-11f1-80b0-7392062373f9\", domainname=\"(none)\", instance=\"vm04\", job=\"node\", machine=\"x86_64\", nodename=\"vm04\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm04\", job=\"node\", machine=\"x86_64\", nodename=\"vm04\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T00:04:07.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:04:07 vm04 ceph-mon[51053]: pgmap v42: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:04:07.354 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:04:07 vm04 ceph-mon[46823]: pgmap v42: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:04:09.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:04:08 vm10 ceph-mon[48982]: pgmap v43: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:04:09.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:04:08 vm04 ceph-mon[51053]: pgmap v43: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:04:09.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:04:08 vm04 ceph-mon[46823]: pgmap v43: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:04:11.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:04:10 vm10 ceph-mon[48982]: pgmap v44: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:04:11.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:04:10 vm04 ceph-mon[51053]: pgmap v44: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:04:11.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:04:10 vm04 ceph-mon[46823]: pgmap v44: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:04:12.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:04:11 vm10 ceph-mon[48982]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:04:12.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:04:11 vm04 ceph-mon[51053]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:04:12.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:04:11 vm04 ceph-mon[46823]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:04:13.099 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:04:12 vm10 ceph-mon[48982]: pgmap v45: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:04:13.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:04:12 vm04 ceph-mon[51053]: pgmap v45: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:04:13.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:04:12 vm04 ceph-mon[46823]: pgmap v45: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:04:13.578 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:04:13 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:04:13] "GET /metrics HTTP/1.1" 200 37526 "" "Prometheus/2.51.0" 2026-03-09T00:04:14.328 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:04:14 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[70647]: ts=2026-03-09T00:04:14.147Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"fdcbddf6-1b49-11f1-80b0-7392062373f9\", cluster_addr=\"192.168.123.104\", device_class=\"hdd\", hostname=\"vm04\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.104\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.104\", device_class=\"hdd\", hostname=\"vm04\", instance=\"192.168.123.110:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.104\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T00:04:15.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:04:14 vm10 ceph-mon[48982]: pgmap v46: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:04:15.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:04:14 vm04 ceph-mon[51053]: pgmap v46: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:04:15.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:04:14 vm04 ceph-mon[46823]: pgmap v46: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:04:17.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:04:16 vm10 ceph-mon[48982]: pgmap v47: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:04:17.328 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:04:16 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[70647]: ts=2026-03-09T00:04:16.949Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm04\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"fdcbddf6-1b49-11f1-80b0-7392062373f9\", domainname=\"(none)\", instance=\"vm04\", job=\"node\", machine=\"x86_64\", nodename=\"vm04\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm04\", job=\"node\", machine=\"x86_64\", nodename=\"vm04\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T00:04:17.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:04:16 vm04 ceph-mon[51053]: pgmap v47: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:04:17.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:04:16 vm04 ceph-mon[46823]: pgmap v47: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:04:18.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:04:17 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:04:18.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:04:17 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:04:18.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:04:17 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:04:19.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:04:18 vm10 ceph-mon[48982]: pgmap v48: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:04:19.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:04:18 vm04 ceph-mon[51053]: pgmap v48: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:04:19.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:04:18 vm04 ceph-mon[46823]: pgmap v48: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:04:21.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:04:21 vm10 ceph-mon[48982]: pgmap v49: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:04:21.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:04:21 vm04 ceph-mon[51053]: pgmap v49: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:04:21.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:04:21 vm04 ceph-mon[46823]: pgmap v49: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:04:22.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:04:22 vm10 ceph-mon[48982]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:04:22.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:04:22 vm04 ceph-mon[51053]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:04:22.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:04:22 vm04 ceph-mon[46823]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:04:23.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:04:23 vm10 ceph-mon[48982]: pgmap v50: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:04:23.327 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:04:23 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:04:23] "GET /metrics HTTP/1.1" 200 37523 "" "Prometheus/2.51.0" 2026-03-09T00:04:23.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:04:23 vm04 ceph-mon[51053]: pgmap v50: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:04:23.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:04:23 vm04 ceph-mon[46823]: pgmap v50: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:04:24.578 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:04:24 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[70647]: ts=2026-03-09T00:04:24.147Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"fdcbddf6-1b49-11f1-80b0-7392062373f9\", cluster_addr=\"192.168.123.104\", device_class=\"hdd\", hostname=\"vm04\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.104\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.104\", device_class=\"hdd\", hostname=\"vm04\", instance=\"192.168.123.110:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.104\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T00:04:25.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:04:25 vm10 ceph-mon[48982]: pgmap v51: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:04:25.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:04:25 vm04 ceph-mon[51053]: pgmap v51: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:04:25.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:04:25 vm04 ceph-mon[46823]: pgmap v51: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:04:27.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:04:27 vm10 ceph-mon[48982]: pgmap v52: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:04:27.328 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:04:26 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[70647]: ts=2026-03-09T00:04:26.949Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm04\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"fdcbddf6-1b49-11f1-80b0-7392062373f9\", domainname=\"(none)\", instance=\"vm04\", job=\"node\", machine=\"x86_64\", nodename=\"vm04\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm04\", job=\"node\", machine=\"x86_64\", nodename=\"vm04\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T00:04:27.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:04:27 vm04 ceph-mon[51053]: pgmap v52: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:04:27.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:04:27 vm04 ceph-mon[46823]: pgmap v52: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:04:29.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:04:28 vm10 ceph-mon[48982]: pgmap v53: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:04:29.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:04:28 vm04 ceph-mon[51053]: pgmap v53: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:04:29.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:04:28 vm04 ceph-mon[46823]: pgmap v53: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:04:31.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:04:30 vm10 ceph-mon[48982]: pgmap v54: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:04:31.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:04:30 vm04 ceph-mon[51053]: pgmap v54: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:04:31.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:04:30 vm04 ceph-mon[46823]: pgmap v54: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:04:32.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:04:31 vm10 ceph-mon[48982]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:04:32.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:04:31 vm04 ceph-mon[51053]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:04:32.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:04:31 vm04 ceph-mon[46823]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:04:33.001 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:04:32 vm04 ceph-mon[51053]: pgmap v55: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:04:33.001 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:04:32 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:04:33.001 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:04:32 vm04 ceph-mon[46823]: pgmap v55: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:04:33.002 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:04:32 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:04:33.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:04:32 vm10 ceph-mon[48982]: pgmap v55: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:04:33.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:04:32 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:04:33.578 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:04:33 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:04:33] "GET /metrics HTTP/1.1" 200 37524 "" "Prometheus/2.51.0" 2026-03-09T00:04:34.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:04:33 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:04:34.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:04:33 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:04:34.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:04:33 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:04:34.409 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:04:34 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[70647]: ts=2026-03-09T00:04:34.147Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"fdcbddf6-1b49-11f1-80b0-7392062373f9\", cluster_addr=\"192.168.123.104\", device_class=\"hdd\", hostname=\"vm04\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.104\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.104\", device_class=\"hdd\", hostname=\"vm04\", instance=\"192.168.123.110:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.104\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T00:04:34.938 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:04:34 vm04 ceph-mon[46823]: pgmap v56: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:04:34.938 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:04:34 vm04 ceph-mon[46823]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:04:34.938 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:04:34 vm04 ceph-mon[46823]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:04:34.938 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:04:34 vm04 ceph-mon[46823]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:04:34.938 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:04:34 vm04 ceph-mon[46823]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:04:34.938 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:04:34 vm04 ceph-mon[51053]: pgmap v56: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:04:34.938 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:04:34 vm04 ceph-mon[51053]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:04:34.938 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:04:34 vm04 ceph-mon[51053]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:04:34.938 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:04:34 vm04 ceph-mon[51053]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:04:34.938 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:04:34 vm04 ceph-mon[51053]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:04:35.031 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:04:34 vm10 ceph-mon[48982]: pgmap v56: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:04:35.031 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:04:34 vm10 ceph-mon[48982]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:04:35.031 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:04:34 vm10 ceph-mon[48982]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:04:35.031 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:04:34 vm10 ceph-mon[48982]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:04:35.031 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:04:34 vm10 ceph-mon[48982]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:04:36.114 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:04:36 vm10 ceph-mon[48982]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:04:36.114 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:04:36 vm10 ceph-mon[48982]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:04:36.114 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:04:36 vm10 ceph-mon[48982]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:04:36.118 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:04:36 vm10 ceph-mon[48982]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:04:36.118 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:04:36 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:04:36.119 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:04:36 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:04:36.119 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:04:36 vm10 ceph-mon[48982]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:04:36.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:04:36 vm04 ceph-mon[51053]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:04:36.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:04:36 vm04 ceph-mon[51053]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:04:36.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:04:36 vm04 ceph-mon[51053]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:04:36.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:04:36 vm04 ceph-mon[51053]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:04:36.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:04:36 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:04:36.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:04:36 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:04:36.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:04:36 vm04 ceph-mon[51053]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:04:36.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:04:36 vm04 ceph-mon[46823]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:04:36.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:04:36 vm04 ceph-mon[46823]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:04:36.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:04:36 vm04 ceph-mon[46823]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:04:36.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:04:36 vm04 ceph-mon[46823]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:04:36.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:04:36 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:04:36.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:04:36 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:04:36.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:04:36 vm04 ceph-mon[46823]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:04:37.328 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:04:36 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[70647]: ts=2026-03-09T00:04:36.948Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm04\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"fdcbddf6-1b49-11f1-80b0-7392062373f9\", domainname=\"(none)\", instance=\"vm04\", job=\"node\", machine=\"x86_64\", nodename=\"vm04\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm04\", job=\"node\", machine=\"x86_64\", nodename=\"vm04\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T00:04:37.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:04:37 vm10 ceph-mon[48982]: pgmap v57: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:04:37.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:04:37 vm04 ceph-mon[51053]: pgmap v57: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:04:37.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:04:37 vm04 ceph-mon[46823]: pgmap v57: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:04:39.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:04:38 vm10 ceph-mon[48982]: pgmap v58: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:04:39.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:04:38 vm04 ceph-mon[51053]: pgmap v58: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:04:39.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:04:38 vm04 ceph-mon[46823]: pgmap v58: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:04:41.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:04:40 vm10 ceph-mon[48982]: pgmap v59: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:04:41.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:04:40 vm04 ceph-mon[51053]: pgmap v59: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:04:41.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:04:40 vm04 ceph-mon[46823]: pgmap v59: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:04:42.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:04:41 vm10 ceph-mon[48982]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:04:42.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:04:41 vm04 ceph-mon[51053]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:04:42.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:04:41 vm04 ceph-mon[46823]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:04:43.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:04:42 vm10 ceph-mon[48982]: pgmap v60: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:04:43.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:04:42 vm04 ceph-mon[51053]: pgmap v60: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:04:43.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:04:42 vm04 ceph-mon[46823]: pgmap v60: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:04:43.577 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:04:43 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:04:43] "GET /metrics HTTP/1.1" 200 37524 "" "Prometheus/2.51.0" 2026-03-09T00:04:44.577 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:04:44 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[70647]: ts=2026-03-09T00:04:44.147Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"fdcbddf6-1b49-11f1-80b0-7392062373f9\", cluster_addr=\"192.168.123.104\", device_class=\"hdd\", hostname=\"vm04\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.104\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.104\", device_class=\"hdd\", hostname=\"vm04\", instance=\"192.168.123.110:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.104\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T00:04:45.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:04:44 vm10 ceph-mon[48982]: pgmap v61: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:04:45.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:04:44 vm04 ceph-mon[51053]: pgmap v61: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:04:45.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:04:44 vm04 ceph-mon[46823]: pgmap v61: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:04:47.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:04:46 vm10 ceph-mon[48982]: pgmap v62: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:04:47.327 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:04:46 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[70647]: ts=2026-03-09T00:04:46.948Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm04\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"fdcbddf6-1b49-11f1-80b0-7392062373f9\", domainname=\"(none)\", instance=\"vm04\", job=\"node\", machine=\"x86_64\", nodename=\"vm04\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm04\", job=\"node\", machine=\"x86_64\", nodename=\"vm04\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T00:04:47.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:04:46 vm04 ceph-mon[51053]: pgmap v62: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:04:47.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:04:46 vm04 ceph-mon[46823]: pgmap v62: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:04:48.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:04:47 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:04:48.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:04:47 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:04:48.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:04:47 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:04:49.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:04:48 vm10 ceph-mon[48982]: pgmap v63: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:04:49.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:04:48 vm04 ceph-mon[51053]: pgmap v63: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:04:49.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:04:48 vm04 ceph-mon[46823]: pgmap v63: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:04:51.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:04:50 vm10 ceph-mon[48982]: pgmap v64: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:04:51.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:04:50 vm04 ceph-mon[51053]: pgmap v64: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:04:51.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:04:50 vm04 ceph-mon[46823]: pgmap v64: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:04:52.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:04:51 vm10 ceph-mon[48982]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:04:52.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:04:51 vm04 ceph-mon[51053]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:04:52.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:04:51 vm04 ceph-mon[46823]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:04:53.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:04:52 vm10 ceph-mon[48982]: pgmap v65: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:04:53.327 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:04:53 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:04:53] "GET /metrics HTTP/1.1" 200 37520 "" "Prometheus/2.51.0" 2026-03-09T00:04:53.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:04:52 vm04 ceph-mon[51053]: pgmap v65: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:04:53.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:04:52 vm04 ceph-mon[46823]: pgmap v65: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:04:54.577 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:04:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[70647]: ts=2026-03-09T00:04:54.146Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"fdcbddf6-1b49-11f1-80b0-7392062373f9\", cluster_addr=\"192.168.123.104\", device_class=\"hdd\", hostname=\"vm04\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.104\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.104\", device_class=\"hdd\", hostname=\"vm04\", instance=\"192.168.123.110:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.104\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T00:04:55.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:04:54 vm10 ceph-mon[48982]: pgmap v66: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:04:55.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:04:54 vm04 ceph-mon[51053]: pgmap v66: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:04:55.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:04:54 vm04 ceph-mon[46823]: pgmap v66: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:04:57.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:04:56 vm10 ceph-mon[48982]: pgmap v67: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:04:57.327 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:04:56 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[70647]: ts=2026-03-09T00:04:56.948Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm04\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"fdcbddf6-1b49-11f1-80b0-7392062373f9\", domainname=\"(none)\", instance=\"vm04\", job=\"node\", machine=\"x86_64\", nodename=\"vm04\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm04\", job=\"node\", machine=\"x86_64\", nodename=\"vm04\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T00:04:57.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:04:56 vm04 ceph-mon[51053]: pgmap v67: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:04:57.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:04:56 vm04 ceph-mon[46823]: pgmap v67: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:04:59.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:04:58 vm10 ceph-mon[48982]: pgmap v68: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:04:59.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:04:58 vm04 ceph-mon[51053]: pgmap v68: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:04:59.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:04:58 vm04 ceph-mon[46823]: pgmap v68: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:05:01.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:05:00 vm10 ceph-mon[48982]: pgmap v69: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:05:01.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:05:00 vm04 ceph-mon[51053]: pgmap v69: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:05:01.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:05:00 vm04 ceph-mon[46823]: pgmap v69: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:05:02.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:05:01 vm10 ceph-mon[48982]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:05:02.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:05:01 vm04 ceph-mon[51053]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:05:02.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:05:01 vm04 ceph-mon[46823]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:05:03.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:05:02 vm10 ceph-mon[48982]: pgmap v70: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:05:03.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:05:02 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:05:03.327 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:05:03 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:05:03] "GET /metrics HTTP/1.1" 200 37522 "" "Prometheus/2.51.0" 2026-03-09T00:05:03.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:05:02 vm04 ceph-mon[51053]: pgmap v70: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:05:03.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:05:02 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:05:03.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:05:02 vm04 ceph-mon[46823]: pgmap v70: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:05:03.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:05:02 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:05:04.578 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:05:04 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[70647]: ts=2026-03-09T00:05:04.147Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"fdcbddf6-1b49-11f1-80b0-7392062373f9\", cluster_addr=\"192.168.123.104\", device_class=\"hdd\", hostname=\"vm04\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.104\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.104\", device_class=\"hdd\", hostname=\"vm04\", instance=\"192.168.123.110:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.104\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T00:05:05.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:05:04 vm10 ceph-mon[48982]: pgmap v71: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:05:05.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:05:04 vm04 ceph-mon[51053]: pgmap v71: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:05:05.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:05:04 vm04 ceph-mon[46823]: pgmap v71: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:05:07.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:05:06 vm10 ceph-mon[48982]: pgmap v72: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:05:07.328 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:05:06 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[70647]: ts=2026-03-09T00:05:06.948Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm04\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"fdcbddf6-1b49-11f1-80b0-7392062373f9\", domainname=\"(none)\", instance=\"vm04\", job=\"node\", machine=\"x86_64\", nodename=\"vm04\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm04\", job=\"node\", machine=\"x86_64\", nodename=\"vm04\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T00:05:07.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:05:06 vm04 ceph-mon[51053]: pgmap v72: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:05:07.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:05:06 vm04 ceph-mon[46823]: pgmap v72: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:05:09.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:05:08 vm10 ceph-mon[48982]: pgmap v73: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:05:09.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:05:08 vm04 ceph-mon[51053]: pgmap v73: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:05:09.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:05:08 vm04 ceph-mon[46823]: pgmap v73: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:05:11.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:05:10 vm10 ceph-mon[48982]: pgmap v74: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:05:11.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:05:10 vm04 ceph-mon[51053]: pgmap v74: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:05:11.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:05:10 vm04 ceph-mon[46823]: pgmap v74: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:05:12.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:05:11 vm10 ceph-mon[48982]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:05:12.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:05:11 vm04 ceph-mon[51053]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:05:12.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:05:11 vm04 ceph-mon[46823]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:05:13.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:05:12 vm10 ceph-mon[48982]: pgmap v75: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:05:13.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:05:12 vm04 ceph-mon[51053]: pgmap v75: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:05:13.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:05:12 vm04 ceph-mon[46823]: pgmap v75: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:05:13.577 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:05:13 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:05:13] "GET /metrics HTTP/1.1" 200 37522 "" "Prometheus/2.51.0" 2026-03-09T00:05:14.577 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:05:14 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[70647]: ts=2026-03-09T00:05:14.147Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"fdcbddf6-1b49-11f1-80b0-7392062373f9\", cluster_addr=\"192.168.123.104\", device_class=\"hdd\", hostname=\"vm04\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.104\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.104\", device_class=\"hdd\", hostname=\"vm04\", instance=\"192.168.123.110:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.104\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T00:05:15.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:05:14 vm10 ceph-mon[48982]: pgmap v76: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:05:15.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:05:14 vm04 ceph-mon[51053]: pgmap v76: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:05:15.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:05:14 vm04 ceph-mon[46823]: pgmap v76: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:05:17.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:05:16 vm10 ceph-mon[48982]: pgmap v77: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:05:17.327 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:05:16 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[70647]: ts=2026-03-09T00:05:16.948Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm04\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"fdcbddf6-1b49-11f1-80b0-7392062373f9\", domainname=\"(none)\", instance=\"vm04\", job=\"node\", machine=\"x86_64\", nodename=\"vm04\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm04\", job=\"node\", machine=\"x86_64\", nodename=\"vm04\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T00:05:17.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:05:16 vm04 ceph-mon[51053]: pgmap v77: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:05:17.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:05:16 vm04 ceph-mon[46823]: pgmap v77: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:05:18.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:05:17 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:05:18.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:05:17 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:05:18.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:05:17 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:05:19.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:05:18 vm10 ceph-mon[48982]: pgmap v78: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:05:19.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:05:18 vm04 ceph-mon[51053]: pgmap v78: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:05:19.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:05:18 vm04 ceph-mon[46823]: pgmap v78: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:05:21.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:05:20 vm10 ceph-mon[48982]: pgmap v79: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:05:21.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:05:20 vm04 ceph-mon[51053]: pgmap v79: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:05:21.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:05:20 vm04 ceph-mon[46823]: pgmap v79: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:05:23.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:05:22 vm10 ceph-mon[48982]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:05:23.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:05:22 vm10 ceph-mon[48982]: pgmap v80: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:05:23.328 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:05:23 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:05:23] "GET /metrics HTTP/1.1" 200 37465 "" "Prometheus/2.51.0" 2026-03-09T00:05:23.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:05:22 vm04 ceph-mon[51053]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:05:23.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:05:22 vm04 ceph-mon[51053]: pgmap v80: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:05:23.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:05:22 vm04 ceph-mon[46823]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:05:23.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:05:22 vm04 ceph-mon[46823]: pgmap v80: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:05:24.578 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:05:24 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[70647]: ts=2026-03-09T00:05:24.147Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"fdcbddf6-1b49-11f1-80b0-7392062373f9\", cluster_addr=\"192.168.123.104\", device_class=\"hdd\", hostname=\"vm04\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.104\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.104\", device_class=\"hdd\", hostname=\"vm04\", instance=\"192.168.123.110:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.104\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T00:05:25.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:05:24 vm10 ceph-mon[48982]: pgmap v81: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:05:25.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:05:24 vm04 ceph-mon[51053]: pgmap v81: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:05:25.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:05:24 vm04 ceph-mon[46823]: pgmap v81: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:05:27.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:05:26 vm10 ceph-mon[48982]: pgmap v82: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:05:27.327 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:05:26 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[70647]: ts=2026-03-09T00:05:26.950Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm04\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"fdcbddf6-1b49-11f1-80b0-7392062373f9\", domainname=\"(none)\", instance=\"vm04\", job=\"node\", machine=\"x86_64\", nodename=\"vm04\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm04\", job=\"node\", machine=\"x86_64\", nodename=\"vm04\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T00:05:27.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:05:26 vm04 ceph-mon[51053]: pgmap v82: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:05:27.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:05:26 vm04 ceph-mon[46823]: pgmap v82: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:05:29.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:05:28 vm10 ceph-mon[48982]: pgmap v83: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:05:29.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:05:28 vm04 ceph-mon[51053]: pgmap v83: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:05:29.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:05:28 vm04 ceph-mon[46823]: pgmap v83: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:05:31.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:05:30 vm10 ceph-mon[48982]: pgmap v84: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:05:31.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:05:30 vm04 ceph-mon[51053]: pgmap v84: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:05:31.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:05:30 vm04 ceph-mon[46823]: pgmap v84: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:05:33.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:05:33 vm04 ceph-mon[51053]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:05:33.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:05:33 vm04 ceph-mon[51053]: pgmap v85: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:05:33.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:05:33 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:05:33.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:05:33 vm04 ceph-mon[46823]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:05:33.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:05:33 vm04 ceph-mon[46823]: pgmap v85: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:05:33.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:05:33 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:05:33.398 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:05:33 vm10 ceph-mon[48982]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:05:33.398 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:05:33 vm10 ceph-mon[48982]: pgmap v85: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:05:33.398 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:05:33 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:05:33.398 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:05:33 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:05:33] "GET /metrics HTTP/1.1" 200 37534 "" "Prometheus/2.51.0" 2026-03-09T00:05:34.578 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:05:34 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[70647]: ts=2026-03-09T00:05:34.147Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"fdcbddf6-1b49-11f1-80b0-7392062373f9\", cluster_addr=\"192.168.123.104\", device_class=\"hdd\", hostname=\"vm04\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.104\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.104\", device_class=\"hdd\", hostname=\"vm04\", instance=\"192.168.123.110:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.104\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T00:05:35.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:05:35 vm10 ceph-mon[48982]: pgmap v86: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:05:35.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:05:35 vm04 ceph-mon[46823]: pgmap v86: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:05:35.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:05:35 vm04 ceph-mon[51053]: pgmap v86: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:05:36.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:05:36 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:05:36.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:05:36 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:05:36.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:05:36 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:05:36.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:05:36 vm10 ceph-mon[48982]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:05:36.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:05:36 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:05:36.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:05:36 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:05:36.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:05:36 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:05:36.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:05:36 vm04 ceph-mon[51053]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:05:36.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:05:36 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:05:36.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:05:36 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:05:36.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:05:36 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:05:36.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:05:36 vm04 ceph-mon[46823]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:05:37.301 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:05:36 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[70647]: ts=2026-03-09T00:05:36.948Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm04\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"fdcbddf6-1b49-11f1-80b0-7392062373f9\", domainname=\"(none)\", instance=\"vm04\", job=\"node\", machine=\"x86_64\", nodename=\"vm04\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm04\", job=\"node\", machine=\"x86_64\", nodename=\"vm04\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T00:05:37.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:05:37 vm10 ceph-mon[48982]: pgmap v87: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:05:37.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:05:37 vm04 ceph-mon[51053]: pgmap v87: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:05:37.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:05:37 vm04 ceph-mon[46823]: pgmap v87: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:05:39.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:05:39 vm10 ceph-mon[48982]: pgmap v88: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:05:39.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:05:39 vm04 ceph-mon[51053]: pgmap v88: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:05:39.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:05:39 vm04 ceph-mon[46823]: pgmap v88: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:05:41.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:05:41 vm10 ceph-mon[48982]: pgmap v89: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:05:41.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:05:41 vm04 ceph-mon[51053]: pgmap v89: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:05:41.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:05:41 vm04 ceph-mon[46823]: pgmap v89: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:05:43.473 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:05:43 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:05:43] "GET /metrics HTTP/1.1" 200 37534 "" "Prometheus/2.51.0" 2026-03-09T00:05:43.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:05:43 vm10 ceph-mon[48982]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:05:43.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:05:43 vm10 ceph-mon[48982]: pgmap v90: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:05:43.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:05:43 vm04 ceph-mon[51053]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:05:43.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:05:43 vm04 ceph-mon[51053]: pgmap v90: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:05:43.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:05:43 vm04 ceph-mon[46823]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:05:43.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:05:43 vm04 ceph-mon[46823]: pgmap v90: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:05:44.484 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:05:44 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[70647]: ts=2026-03-09T00:05:44.146Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"fdcbddf6-1b49-11f1-80b0-7392062373f9\", cluster_addr=\"192.168.123.104\", device_class=\"hdd\", hostname=\"vm04\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.104\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.104\", device_class=\"hdd\", hostname=\"vm04\", instance=\"192.168.123.110:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.104\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T00:05:44.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:05:44 vm10 ceph-mon[48982]: pgmap v91: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:05:44.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:05:44 vm04 ceph-mon[51053]: pgmap v91: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:05:44.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:05:44 vm04 ceph-mon[46823]: pgmap v91: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:05:47.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:05:46 vm10 ceph-mon[48982]: pgmap v92: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:05:47.078 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:05:46 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[70647]: ts=2026-03-09T00:05:46.948Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm04\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"fdcbddf6-1b49-11f1-80b0-7392062373f9\", domainname=\"(none)\", instance=\"vm04\", job=\"node\", machine=\"x86_64\", nodename=\"vm04\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm04\", job=\"node\", machine=\"x86_64\", nodename=\"vm04\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T00:05:47.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:05:46 vm04 ceph-mon[51053]: pgmap v92: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:05:47.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:05:46 vm04 ceph-mon[46823]: pgmap v92: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:05:48.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:05:47 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:05:48.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:05:47 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:05:48.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:05:47 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:05:49.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:05:48 vm10 ceph-mon[48982]: pgmap v93: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:05:49.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:05:48 vm04 ceph-mon[51053]: pgmap v93: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:05:49.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:05:48 vm04 ceph-mon[46823]: pgmap v93: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:05:51.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:05:50 vm10 ceph-mon[48982]: pgmap v94: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:05:51.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:05:50 vm04 ceph-mon[51053]: pgmap v94: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:05:51.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:05:50 vm04 ceph-mon[46823]: pgmap v94: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:05:53.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:05:52 vm10 ceph-mon[48982]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:05:53.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:05:52 vm10 ceph-mon[48982]: pgmap v95: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:05:53.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:05:52 vm04 ceph-mon[51053]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:05:53.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:05:52 vm04 ceph-mon[51053]: pgmap v95: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:05:53.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:05:52 vm04 ceph-mon[46823]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:05:53.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:05:52 vm04 ceph-mon[46823]: pgmap v95: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:05:53.578 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:05:53 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:05:53] "GET /metrics HTTP/1.1" 200 37531 "" "Prometheus/2.51.0" 2026-03-09T00:05:54.578 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:05:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[70647]: ts=2026-03-09T00:05:54.147Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"fdcbddf6-1b49-11f1-80b0-7392062373f9\", cluster_addr=\"192.168.123.104\", device_class=\"hdd\", hostname=\"vm04\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.104\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.104\", device_class=\"hdd\", hostname=\"vm04\", instance=\"192.168.123.110:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.104\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T00:05:55.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:05:54 vm10 ceph-mon[48982]: pgmap v96: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:05:55.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:05:54 vm04 ceph-mon[51053]: pgmap v96: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:05:55.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:05:54 vm04 ceph-mon[46823]: pgmap v96: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:05:57.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:05:56 vm10 ceph-mon[48982]: pgmap v97: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:05:57.328 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:05:56 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[70647]: ts=2026-03-09T00:05:56.948Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm04\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"fdcbddf6-1b49-11f1-80b0-7392062373f9\", domainname=\"(none)\", instance=\"vm04\", job=\"node\", machine=\"x86_64\", nodename=\"vm04\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm04\", job=\"node\", machine=\"x86_64\", nodename=\"vm04\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T00:05:57.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:05:56 vm04 ceph-mon[51053]: pgmap v97: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:05:57.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:05:56 vm04 ceph-mon[46823]: pgmap v97: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:05:59.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:05:58 vm10 ceph-mon[48982]: pgmap v98: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:05:59.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:05:58 vm04 ceph-mon[51053]: pgmap v98: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:05:59.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:05:58 vm04 ceph-mon[46823]: pgmap v98: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:06:01.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:06:00 vm10 ceph-mon[48982]: pgmap v99: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:06:01.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:06:00 vm04 ceph-mon[51053]: pgmap v99: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:06:01.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:06:00 vm04 ceph-mon[46823]: pgmap v99: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:06:02.260 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch daemon redeploy "mgr.$(ceph mgr dump -f json | jq .standbys | jq .[] | jq -r .name)" --image quay.ceph.io/ceph-ci/ceph:$sha1' 2026-03-09T00:06:03.101 INFO:teuthology.orchestra.run.vm04.stdout:Scheduled to redeploy mgr.y on host 'vm04' 2026-03-09T00:06:03.120 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:06:02 vm04 ceph-mon[46823]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:06:03.120 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:06:02 vm04 ceph-mon[46823]: pgmap v100: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:06:03.121 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:06:02 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:06:03.121 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:06:02 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/3992915959' entity='client.admin' cmd=[{"prefix": "mgr dump", "format": "json"}]: dispatch 2026-03-09T00:06:03.121 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:06:02 vm04 ceph-mon[51053]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:06:03.121 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:06:02 vm04 ceph-mon[51053]: pgmap v100: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:06:03.121 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:06:02 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:06:03.121 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:06:02 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/3992915959' entity='client.admin' cmd=[{"prefix": "mgr dump", "format": "json"}]: dispatch 2026-03-09T00:06:03.188 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch ps --refresh' 2026-03-09T00:06:03.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:06:02 vm10 ceph-mon[48982]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:06:03.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:06:02 vm10 ceph-mon[48982]: pgmap v100: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:06:03.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:06:02 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:06:03.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:06:02 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/3992915959' entity='client.admin' cmd=[{"prefix": "mgr dump", "format": "json"}]: dispatch 2026-03-09T00:06:03.328 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:06:03 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:06:03] "GET /metrics HTTP/1.1" 200 37535 "" "Prometheus/2.51.0" 2026-03-09T00:06:03.703 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-09T00:06:03.703 INFO:teuthology.orchestra.run.vm04.stdout:alertmanager.a vm04 *:9093,9094 running (2m) 88s ago 9m 22.7M - 0.25.0 c8568f914cd2 bcac0140b0f6 2026-03-09T00:06:03.703 INFO:teuthology.orchestra.run.vm04.stdout:grafana.a vm10 *:3000 running (2m) 88s ago 9m 45.2M - dad864ee21e9 9fb25843918b 2026-03-09T00:06:03.703 INFO:teuthology.orchestra.run.vm04.stdout:iscsi.foo.vm04.fbyciv vm04 running (2m) 88s ago 9m 44.0M - 3.5 e1d6a67b021e c94e791a5738 2026-03-09T00:06:03.703 INFO:teuthology.orchestra.run.vm04.stdout:mgr.x vm10 *:8443,9283 running (4m) 88s ago 11m 556M - 19.2.3-678-ge911bdeb 654f31e6858e 2d7d59a967f3 2026-03-09T00:06:03.703 INFO:teuthology.orchestra.run.vm04.stdout:mgr.y vm04 *:9283 running (12m) 88s ago 12m 418M - 17.2.0 e1d6a67b021e 428d867911a5 2026-03-09T00:06:03.703 INFO:teuthology.orchestra.run.vm04.stdout:mon.a vm04 running (12m) 88s ago 12m 54.6M 2048M 17.2.0 e1d6a67b021e a0a441d060f5 2026-03-09T00:06:03.703 INFO:teuthology.orchestra.run.vm04.stdout:mon.b vm10 running (11m) 88s ago 11m 46.1M 2048M 17.2.0 e1d6a67b021e a4c3c4f2dde9 2026-03-09T00:06:03.703 INFO:teuthology.orchestra.run.vm04.stdout:mon.c vm04 running (11m) 88s ago 11m 39.9M 2048M 17.2.0 e1d6a67b021e 5c2d9165643c 2026-03-09T00:06:03.703 INFO:teuthology.orchestra.run.vm04.stdout:node-exporter.a vm04 *:9100 running (2m) 88s ago 9m 9499k - 1.7.0 72c9c2088986 38e0af6b2fbf 2026-03-09T00:06:03.703 INFO:teuthology.orchestra.run.vm04.stdout:node-exporter.b vm10 *:9100 running (2m) 88s ago 9m 9365k - 1.7.0 72c9c2088986 d059c0022310 2026-03-09T00:06:03.703 INFO:teuthology.orchestra.run.vm04.stdout:osd.0 vm04 running (11m) 88s ago 11m 54.6M 4096M 17.2.0 e1d6a67b021e eb4d6ee04c91 2026-03-09T00:06:03.703 INFO:teuthology.orchestra.run.vm04.stdout:osd.1 vm04 running (10m) 88s ago 10m 52.7M 4096M 17.2.0 e1d6a67b021e f112f05700b8 2026-03-09T00:06:03.703 INFO:teuthology.orchestra.run.vm04.stdout:osd.2 vm04 running (10m) 88s ago 10m 50.5M 4096M 17.2.0 e1d6a67b021e a4ed5ecab7e4 2026-03-09T00:06:03.703 INFO:teuthology.orchestra.run.vm04.stdout:osd.3 vm04 running (10m) 88s ago 10m 51.3M 4096M 17.2.0 e1d6a67b021e d530f6e786d9 2026-03-09T00:06:03.703 INFO:teuthology.orchestra.run.vm04.stdout:osd.4 vm10 running (10m) 88s ago 10m 51.8M 4096M 17.2.0 e1d6a67b021e ad302e6f363c 2026-03-09T00:06:03.703 INFO:teuthology.orchestra.run.vm04.stdout:osd.5 vm10 running (10m) 88s ago 10m 51.5M 4096M 17.2.0 e1d6a67b021e 1bef86fdb303 2026-03-09T00:06:03.703 INFO:teuthology.orchestra.run.vm04.stdout:osd.6 vm10 running (10m) 88s ago 10m 50.5M 4096M 17.2.0 e1d6a67b021e 168db5828111 2026-03-09T00:06:03.703 INFO:teuthology.orchestra.run.vm04.stdout:osd.7 vm10 running (9m) 88s ago 9m 53.7M 4096M 17.2.0 e1d6a67b021e bc6bbac15079 2026-03-09T00:06:03.703 INFO:teuthology.orchestra.run.vm04.stdout:prometheus.a vm10 *:9095 running (2m) 88s ago 9m 39.6M - 2.51.0 1d3b7f56885b 98db0255a281 2026-03-09T00:06:03.703 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.ehrfsf vm04 *:8000 running (9m) 88s ago 9m 95.0M - 17.2.0 e1d6a67b021e a815abb0c790 2026-03-09T00:06:03.703 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm10.dwizvi vm10 *:8000 running (9m) 88s ago 9m 92.9M - 17.2.0 e1d6a67b021e f6412acdf6e0 2026-03-09T00:06:03.751 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'sleep 180' 2026-03-09T00:06:04.289 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:06:04 vm04 ceph-mon[46823]: from='client.24889 -' entity='client.admin' cmd=[{"prefix": "orch daemon redeploy", "name": "mgr.y", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:06:04.289 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:06:04 vm04 ceph-mon[46823]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:06:04.289 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:06:04 vm04 ceph-mon[46823]: Schedule redeploy daemon mgr.y 2026-03-09T00:06:04.289 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:06:04 vm04 ceph-mon[46823]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:06:04.289 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:06:04 vm04 ceph-mon[46823]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:06:04.289 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:06:04 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:06:04.289 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:06:04 vm04 ceph-mon[51053]: from='client.24889 -' entity='client.admin' cmd=[{"prefix": "orch daemon redeploy", "name": "mgr.y", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:06:04.289 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:06:04 vm04 ceph-mon[51053]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:06:04.289 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:06:04 vm04 ceph-mon[51053]: Schedule redeploy daemon mgr.y 2026-03-09T00:06:04.289 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:06:04 vm04 ceph-mon[51053]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:06:04.289 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:06:04 vm04 ceph-mon[51053]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:06:04.289 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:06:04 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:06:04.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:06:04 vm10 ceph-mon[48982]: from='client.24889 -' entity='client.admin' cmd=[{"prefix": "orch daemon redeploy", "name": "mgr.y", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:06:04.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:06:04 vm10 ceph-mon[48982]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:06:04.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:06:04 vm10 ceph-mon[48982]: Schedule redeploy daemon mgr.y 2026-03-09T00:06:04.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:06:04 vm10 ceph-mon[48982]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:06:04.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:06:04 vm10 ceph-mon[48982]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:06:04.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:06:04 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:06:04.578 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:06:04 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[70647]: ts=2026-03-09T00:06:04.148Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"fdcbddf6-1b49-11f1-80b0-7392062373f9\", cluster_addr=\"192.168.123.104\", device_class=\"hdd\", hostname=\"vm04\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.104\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.104\", device_class=\"hdd\", hostname=\"vm04\", instance=\"192.168.123.110:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.104\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T00:06:05.329 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:06:05 vm10 ceph-mon[48982]: from='client.14976 -' entity='client.admin' cmd=[{"prefix": "orch ps", "refresh": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:06:05.329 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:06:05 vm10 ceph-mon[48982]: pgmap v101: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:06:05.329 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:06:05 vm10 ceph-mon[48982]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:06:05.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:06:05 vm04 ceph-mon[46823]: from='client.14976 -' entity='client.admin' cmd=[{"prefix": "orch ps", "refresh": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:06:05.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:06:05 vm04 ceph-mon[46823]: pgmap v101: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:06:05.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:06:05 vm04 ceph-mon[46823]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:06:05.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:06:05 vm04 ceph-mon[51053]: from='client.14976 -' entity='client.admin' cmd=[{"prefix": "orch ps", "refresh": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:06:05.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:06:05 vm04 ceph-mon[51053]: pgmap v101: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:06:05.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:06:05 vm04 ceph-mon[51053]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:06:06.682 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:06:06 vm04 systemd[1]: Stopping Ceph mgr.y for fdcbddf6-1b49-11f1-80b0-7392062373f9... 2026-03-09T00:06:06.948 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:06:06 vm04 podman[81301]: 2026-03-09 00:06:06.678655222 +0000 UTC m=+0.049998711 container died 428d867911a552f2c6797fec081b7a59e0e1f55239e3e1e6ec1f80583f965638 (image=quay.io/ceph/ceph:v17.2.0, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y, io.openshift.expose-services=, CEPH_POINT_RELEASE=-17.2.0, RELEASE=HEAD, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, io.buildah.version=1.19.8, vendor=Red Hat, Inc., com.redhat.license_terms=https://centos.org/legal/licensing-policy/, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, name=centos-stream, io.openshift.tags=base centos centos-stream, maintainer=Guillaume Abrioux , io.k8s.display-name=CentOS Stream 8, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, version=8, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, com.redhat.component=centos-stream-container, vcs-type=git, GIT_BRANCH=HEAD, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, build-date=2022-05-03T08:36:31.336870, distribution-scope=public, GIT_CLEAN=True, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., GIT_REPO=https://github.com/ceph/ceph-container.git, ceph=True, release=754, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, architecture=x86_64) 2026-03-09T00:06:06.948 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:06:06 vm04 podman[81301]: 2026-03-09 00:06:06.704204152 +0000 UTC m=+0.075547641 container remove 428d867911a552f2c6797fec081b7a59e0e1f55239e3e1e6ec1f80583f965638 (image=quay.io/ceph/ceph:v17.2.0, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, io.openshift.tags=base centos centos-stream, version=8, RELEASE=HEAD, architecture=x86_64, build-date=2022-05-03T08:36:31.336870, com.redhat.component=centos-stream-container, vcs-type=git, ceph=True, distribution-scope=public, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, CEPH_POINT_RELEASE=-17.2.0, GIT_BRANCH=HEAD, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, io.buildah.version=1.19.8, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, GIT_REPO=https://github.com/ceph/ceph-container.git, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., release=754, vendor=Red Hat, Inc., GIT_CLEAN=True, maintainer=Guillaume Abrioux , name=centos-stream, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, io.k8s.display-name=CentOS Stream 8, io.openshift.expose-services=) 2026-03-09T00:06:06.948 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:06:06 vm04 bash[81301]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y 2026-03-09T00:06:06.948 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:06:06 vm04 systemd[1]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@mgr.y.service: Main process exited, code=exited, status=143/n/a 2026-03-09T00:06:06.948 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:06:06 vm04 systemd[1]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@mgr.y.service: Failed with result 'exit-code'. 2026-03-09T00:06:06.948 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:06:06 vm04 systemd[1]: Stopped Ceph mgr.y for fdcbddf6-1b49-11f1-80b0-7392062373f9. 2026-03-09T00:06:06.948 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:06:06 vm04 systemd[1]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@mgr.y.service: Consumed 36.712s CPU time. 2026-03-09T00:06:06.948 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:06:06 vm04 ceph-mon[51053]: pgmap v102: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:06:06.948 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:06:06 vm04 ceph-mon[51053]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:06:06.948 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:06:06 vm04 ceph-mon[51053]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:06:06.948 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:06:06 vm04 ceph-mon[51053]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:06:06.948 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:06:06 vm04 ceph-mon[51053]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:06:06.948 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:06:06 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:06:06.948 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:06:06 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:06:06.948 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:06:06 vm04 ceph-mon[51053]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:06:06.948 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:06:06 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.y", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-09T00:06:06.948 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:06:06 vm04 ceph-mon[51053]: from='mgr.24772 ' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.y", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-09T00:06:06.948 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:06:06 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-09T00:06:06.948 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:06:06 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:06:06.948 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:06:06 vm04 ceph-mon[51053]: Deploying daemon mgr.y on vm04 2026-03-09T00:06:06.948 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:06:06 vm04 ceph-mon[46823]: pgmap v102: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:06:06.948 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:06:06 vm04 ceph-mon[46823]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:06:06.948 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:06:06 vm04 ceph-mon[46823]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:06:06.948 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:06:06 vm04 ceph-mon[46823]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:06:06.948 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:06:06 vm04 ceph-mon[46823]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:06:06.948 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:06:06 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:06:06.948 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:06:06 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:06:06.948 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:06:06 vm04 ceph-mon[46823]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:06:06.948 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:06:06 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.y", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-09T00:06:06.948 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:06:06 vm04 ceph-mon[46823]: from='mgr.24772 ' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.y", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-09T00:06:06.948 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:06:06 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-09T00:06:06.948 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:06:06 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:06:06.948 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:06:06 vm04 ceph-mon[46823]: Deploying daemon mgr.y on vm04 2026-03-09T00:06:07.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:06:06 vm10 ceph-mon[48982]: pgmap v102: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:06:07.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:06:06 vm10 ceph-mon[48982]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:06:07.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:06:06 vm10 ceph-mon[48982]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:06:07.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:06:06 vm10 ceph-mon[48982]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:06:07.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:06:06 vm10 ceph-mon[48982]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:06:07.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:06:06 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:06:07.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:06:06 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:06:07.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:06:06 vm10 ceph-mon[48982]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:06:07.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:06:06 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.y", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-09T00:06:07.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:06:06 vm10 ceph-mon[48982]: from='mgr.24772 ' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.y", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-09T00:06:07.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:06:06 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-09T00:06:07.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:06:06 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:06:07.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:06:06 vm10 ceph-mon[48982]: Deploying daemon mgr.y on vm04 2026-03-09T00:06:07.078 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:06:06 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[70647]: ts=2026-03-09T00:06:06.949Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm04\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"fdcbddf6-1b49-11f1-80b0-7392062373f9\", domainname=\"(none)\", instance=\"vm04\", job=\"node\", machine=\"x86_64\", nodename=\"vm04\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm04\", job=\"node\", machine=\"x86_64\", nodename=\"vm04\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T00:06:07.204 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:06:06 vm04 systemd[1]: Starting Ceph mgr.y for fdcbddf6-1b49-11f1-80b0-7392062373f9... 2026-03-09T00:06:07.204 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:06:07 vm04 podman[81413]: 2026-03-09 00:06:07.043592693 +0000 UTC m=+0.015556065 container create 72a51572b51b0c1e660b60b36ea1df31ca97d4916a742af47c3547608b47b1ef (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_REF=squid, org.opencontainers.image.authors=Ceph Release Team ) 2026-03-09T00:06:07.204 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:06:07 vm04 podman[81413]: 2026-03-09 00:06:07.081784776 +0000 UTC m=+0.053748148 container init 72a51572b51b0c1e660b60b36ea1df31ca97d4916a742af47c3547608b47b1ef (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, org.label-schema.vendor=CentOS, ceph=True, OSD_FLAVOR=default, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.authors=Ceph Release Team , GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=squid) 2026-03-09T00:06:07.204 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:06:07 vm04 podman[81413]: 2026-03-09 00:06:07.084742449 +0000 UTC m=+0.056705821 container start 72a51572b51b0c1e660b60b36ea1df31ca97d4916a742af47c3547608b47b1ef (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, io.buildah.version=1.41.3, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.schema-version=1.0, ceph=True, CEPH_REF=squid, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20260223) 2026-03-09T00:06:07.204 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:06:07 vm04 bash[81413]: 72a51572b51b0c1e660b60b36ea1df31ca97d4916a742af47c3547608b47b1ef 2026-03-09T00:06:07.204 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:06:07 vm04 podman[81413]: 2026-03-09 00:06:07.037475699 +0000 UTC m=+0.009439081 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc 2026-03-09T00:06:07.204 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:06:07 vm04 systemd[1]: Started Ceph mgr.y for fdcbddf6-1b49-11f1-80b0-7392062373f9. 2026-03-09T00:06:07.204 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:06:07 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: 2026-03-09T00:06:07.202+0000 7f0aeb5f7140 -1 mgr[py] Module alerts has missing NOTIFY_TYPES member 2026-03-09T00:06:07.494 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:06:07 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: 2026-03-09T00:06:07.247+0000 7f0aeb5f7140 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-09T00:06:07.767 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:06:07 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: 2026-03-09T00:06:07.721+0000 7f0aeb5f7140 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-09T00:06:08.351 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:06:08 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: 2026-03-09T00:06:08.098+0000 7f0aeb5f7140 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-09T00:06:08.351 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:06:08 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: /lib64/python3.9/site-packages/scipy/__init__.py:73: UserWarning: NumPy was imported from a Python sub-interpreter but NumPy does not properly support sub-interpreters. This will likely work for most users but might cause hard to track down issues or subtle bugs. A common user of the rare sub-interpreter feature is wsgi which also allows single-interpreter mode. 2026-03-09T00:06:08.351 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:06:08 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: Improvements in the case of bugs are welcome, but is not on the NumPy roadmap, and full support may require significant effort to achieve. 2026-03-09T00:06:08.351 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:06:08 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: from numpy import show_config as show_numpy_config 2026-03-09T00:06:08.351 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:06:08 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: 2026-03-09T00:06:08.204+0000 7f0aeb5f7140 -1 mgr[py] Module diskprediction_local has missing NOTIFY_TYPES member 2026-03-09T00:06:08.351 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:06:08 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: 2026-03-09T00:06:08.250+0000 7f0aeb5f7140 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-09T00:06:08.351 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:06:08 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: 2026-03-09T00:06:08.324+0000 7f0aeb5f7140 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-09T00:06:08.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:06:08 vm04 ceph-mon[46823]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:06:08.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:06:08 vm04 ceph-mon[46823]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:06:08.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:06:08 vm04 ceph-mon[46823]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:06:08.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:06:08 vm04 ceph-mon[46823]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:06:08.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:06:08 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:06:08.352 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:06:08 vm04 ceph-mon[51053]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:06:08.352 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:06:08 vm04 ceph-mon[51053]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:06:08.352 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:06:08 vm04 ceph-mon[51053]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:06:08.352 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:06:08 vm04 ceph-mon[51053]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:06:08.352 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:06:08 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:06:08.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:06:08 vm10 ceph-mon[48982]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:06:08.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:06:08 vm10 ceph-mon[48982]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:06:08.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:06:08 vm10 ceph-mon[48982]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:06:08.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:06:08 vm10 ceph-mon[48982]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:06:08.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:06:08 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:06:09.143 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:06:08 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: 2026-03-09T00:06:08.892+0000 7f0aeb5f7140 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-09T00:06:09.144 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:06:09 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: 2026-03-09T00:06:09.019+0000 7f0aeb5f7140 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-09T00:06:09.144 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:06:09 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: 2026-03-09T00:06:09.058+0000 7f0aeb5f7140 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-09T00:06:09.144 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:06:09 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: 2026-03-09T00:06:09.095+0000 7f0aeb5f7140 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-09T00:06:09.144 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:06:09 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: 2026-03-09T00:06:09.139+0000 7f0aeb5f7140 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-09T00:06:09.144 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:06:09 vm04 ceph-mon[46823]: pgmap v103: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:06:09.144 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:06:09 vm04 ceph-mon[46823]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:06:09.144 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:06:09 vm04 ceph-mon[46823]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:06:09.144 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:06:09 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:06:09.415 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:06:09 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: 2026-03-09T00:06:09.182+0000 7f0aeb5f7140 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-09T00:06:09.415 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:06:09 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: 2026-03-09T00:06:09.361+0000 7f0aeb5f7140 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-09T00:06:09.416 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:06:09 vm04 ceph-mon[51053]: pgmap v103: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:06:09.416 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:06:09 vm04 ceph-mon[51053]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:06:09.416 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:06:09 vm04 ceph-mon[51053]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:06:09.416 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:06:09 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:06:09.416 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:06:09 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:06:09.416 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:06:09 vm04 ceph-mon[51053]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:06:09.416 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:06:09 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:06:09.416 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:06:09 vm04 ceph-mon[46823]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:06:09.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:06:09 vm10 ceph-mon[48982]: pgmap v103: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:06:09.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:06:09 vm10 ceph-mon[48982]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:06:09.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:06:09 vm10 ceph-mon[48982]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:06:09.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:06:09 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:06:09.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:06:09 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:06:09.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:06:09 vm10 ceph-mon[48982]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:06:09.851 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:06:09 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: 2026-03-09T00:06:09.414+0000 7f0aeb5f7140 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-09T00:06:09.851 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:06:09 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: 2026-03-09T00:06:09.643+0000 7f0aeb5f7140 -1 mgr[py] Module rgw has missing NOTIFY_TYPES member 2026-03-09T00:06:10.205 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:06:09 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: 2026-03-09T00:06:09.933+0000 7f0aeb5f7140 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-09T00:06:10.205 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:06:09 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: 2026-03-09T00:06:09.970+0000 7f0aeb5f7140 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-09T00:06:10.205 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:06:10 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: 2026-03-09T00:06:10.011+0000 7f0aeb5f7140 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-09T00:06:10.205 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:06:10 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: 2026-03-09T00:06:10.087+0000 7f0aeb5f7140 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-09T00:06:10.205 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:06:10 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: 2026-03-09T00:06:10.123+0000 7f0aeb5f7140 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-09T00:06:10.463 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:06:10 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: 2026-03-09T00:06:10.203+0000 7f0aeb5f7140 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-09T00:06:10.463 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:06:10 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: 2026-03-09T00:06:10.317+0000 7f0aeb5f7140 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-09T00:06:10.850 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:06:10 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: 2026-03-09T00:06:10.462+0000 7f0aeb5f7140 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-09T00:06:10.851 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:06:10 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: 2026-03-09T00:06:10.502+0000 7f0aeb5f7140 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-09T00:06:10.851 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:06:10 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: [09/Mar/2026:00:06:10] ENGINE Bus STARTING 2026-03-09T00:06:10.851 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:06:10 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: CherryPy Checker: 2026-03-09T00:06:10.851 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:06:10 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: The Application mounted at '' has an empty config. 2026-03-09T00:06:10.851 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:06:10 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: 2026-03-09T00:06:10.851 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:06:10 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: [09/Mar/2026:00:06:10] ENGINE Serving on http://:::9283 2026-03-09T00:06:10.851 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:06:10 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: [09/Mar/2026:00:06:10] ENGINE Bus STARTED 2026-03-09T00:06:11.486 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:06:11 vm04 ceph-mon[51053]: pgmap v104: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:06:11.486 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:06:11 vm04 ceph-mon[51053]: Standby manager daemon y restarted 2026-03-09T00:06:11.486 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:06:11 vm04 ceph-mon[51053]: Standby manager daemon y started 2026-03-09T00:06:11.486 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:06:11 vm04 ceph-mon[51053]: from='mgr.? 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/crt"}]: dispatch 2026-03-09T00:06:11.486 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:06:11 vm04 ceph-mon[51053]: from='mgr.? 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-09T00:06:11.486 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:06:11 vm04 ceph-mon[51053]: from='mgr.? 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/key"}]: dispatch 2026-03-09T00:06:11.486 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:06:11 vm04 ceph-mon[51053]: from='mgr.? 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-09T00:06:11.486 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:06:11 vm04 ceph-mon[46823]: pgmap v104: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:06:11.486 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:06:11 vm04 ceph-mon[46823]: Standby manager daemon y restarted 2026-03-09T00:06:11.486 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:06:11 vm04 ceph-mon[46823]: Standby manager daemon y started 2026-03-09T00:06:11.486 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:06:11 vm04 ceph-mon[46823]: from='mgr.? 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/crt"}]: dispatch 2026-03-09T00:06:11.486 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:06:11 vm04 ceph-mon[46823]: from='mgr.? 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-09T00:06:11.486 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:06:11 vm04 ceph-mon[46823]: from='mgr.? 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/key"}]: dispatch 2026-03-09T00:06:11.486 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:06:11 vm04 ceph-mon[46823]: from='mgr.? 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-09T00:06:11.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:06:11 vm10 ceph-mon[48982]: pgmap v104: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:06:11.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:06:11 vm10 ceph-mon[48982]: Standby manager daemon y restarted 2026-03-09T00:06:11.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:06:11 vm10 ceph-mon[48982]: Standby manager daemon y started 2026-03-09T00:06:11.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:06:11 vm10 ceph-mon[48982]: from='mgr.? 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/crt"}]: dispatch 2026-03-09T00:06:11.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:06:11 vm10 ceph-mon[48982]: from='mgr.? 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-09T00:06:11.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:06:11 vm10 ceph-mon[48982]: from='mgr.? 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/key"}]: dispatch 2026-03-09T00:06:11.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:06:11 vm10 ceph-mon[48982]: from='mgr.? 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-09T00:06:12.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:06:12 vm10 ceph-mon[48982]: mgrmap e26: x(active, since 3m), standbys: y 2026-03-09T00:06:12.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:06:12 vm04 ceph-mon[51053]: mgrmap e26: x(active, since 3m), standbys: y 2026-03-09T00:06:12.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:06:12 vm04 ceph-mon[46823]: mgrmap e26: x(active, since 3m), standbys: y 2026-03-09T00:06:13.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:06:13 vm10 ceph-mon[48982]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:06:13.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:06:13 vm10 ceph-mon[48982]: pgmap v105: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:06:13.577 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:06:13 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:06:13] "GET /metrics HTTP/1.1" 200 37535 "" "Prometheus/2.51.0" 2026-03-09T00:06:13.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:06:13 vm04 ceph-mon[51053]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:06:13.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:06:13 vm04 ceph-mon[51053]: pgmap v105: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:06:13.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:06:13 vm04 ceph-mon[46823]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:06:13.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:06:13 vm04 ceph-mon[46823]: pgmap v105: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:06:14.578 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:06:14 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[70647]: ts=2026-03-09T00:06:14.146Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"fdcbddf6-1b49-11f1-80b0-7392062373f9\", cluster_addr=\"192.168.123.104\", device_class=\"hdd\", hostname=\"vm04\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.104\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.104\", device_class=\"hdd\", hostname=\"vm04\", instance=\"192.168.123.110:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.104\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T00:06:15.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:06:15 vm10 ceph-mon[48982]: pgmap v106: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:06:15.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:06:15 vm04 ceph-mon[51053]: pgmap v106: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:06:15.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:06:15 vm04 ceph-mon[46823]: pgmap v106: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:06:17.289 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:06:16 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[70647]: ts=2026-03-09T00:06:16.949Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm04\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"fdcbddf6-1b49-11f1-80b0-7392062373f9\", domainname=\"(none)\", instance=\"vm04\", job=\"node\", machine=\"x86_64\", nodename=\"vm04\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm04\", job=\"node\", machine=\"x86_64\", nodename=\"vm04\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T00:06:17.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:06:17 vm10 ceph-mon[48982]: pgmap v107: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:06:17.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:06:17 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:06:17.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:06:17 vm04 ceph-mon[51053]: pgmap v107: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:06:17.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:06:17 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:06:17.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:06:17 vm04 ceph-mon[46823]: pgmap v107: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:06:17.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:06:17 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:06:19.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:06:19 vm10 ceph-mon[48982]: pgmap v108: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:06:19.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:06:19 vm04 ceph-mon[51053]: pgmap v108: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:06:19.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:06:19 vm04 ceph-mon[46823]: pgmap v108: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:06:21.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:06:21 vm10 ceph-mon[48982]: pgmap v109: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:06:21.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:06:21 vm04 ceph-mon[51053]: pgmap v109: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:06:21.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:06:21 vm04 ceph-mon[46823]: pgmap v109: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:06:23.383 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:06:23 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:06:23] "GET /metrics HTTP/1.1" 200 37532 "" "Prometheus/2.51.0" 2026-03-09T00:06:23.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:06:23 vm10 ceph-mon[48982]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:06:23.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:06:23 vm10 ceph-mon[48982]: pgmap v110: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:06:23.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:06:23 vm04 ceph-mon[51053]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:06:23.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:06:23 vm04 ceph-mon[51053]: pgmap v110: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:06:23.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:06:23 vm04 ceph-mon[46823]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:06:23.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:06:23 vm04 ceph-mon[46823]: pgmap v110: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:06:24.578 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:06:24 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[70647]: ts=2026-03-09T00:06:24.146Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"fdcbddf6-1b49-11f1-80b0-7392062373f9\", cluster_addr=\"192.168.123.104\", device_class=\"hdd\", hostname=\"vm04\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.104\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.104\", device_class=\"hdd\", hostname=\"vm04\", instance=\"192.168.123.110:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.104\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T00:06:25.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:06:25 vm10 ceph-mon[48982]: pgmap v111: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:06:25.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:06:25 vm04 ceph-mon[51053]: pgmap v111: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:06:25.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:06:25 vm04 ceph-mon[46823]: pgmap v111: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:06:26.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:06:26 vm10 ceph-mon[48982]: pgmap v112: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:06:26.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:06:26 vm04 ceph-mon[51053]: pgmap v112: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:06:26.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:06:26 vm04 ceph-mon[46823]: pgmap v112: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:06:27.328 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:06:26 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[70647]: ts=2026-03-09T00:06:26.949Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm04\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"fdcbddf6-1b49-11f1-80b0-7392062373f9\", domainname=\"(none)\", instance=\"vm04\", job=\"node\", machine=\"x86_64\", nodename=\"vm04\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm04\", job=\"node\", machine=\"x86_64\", nodename=\"vm04\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T00:06:29.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:06:28 vm10 ceph-mon[48982]: pgmap v113: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:06:29.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:06:28 vm04 ceph-mon[51053]: pgmap v113: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:06:29.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:06:28 vm04 ceph-mon[46823]: pgmap v113: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:06:31.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:06:30 vm10 ceph-mon[48982]: pgmap v114: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:06:31.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:06:30 vm04 ceph-mon[46823]: pgmap v114: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:06:31.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:06:30 vm04 ceph-mon[51053]: pgmap v114: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:06:33.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:06:32 vm10 ceph-mon[48982]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:06:33.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:06:32 vm10 ceph-mon[48982]: pgmap v115: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:06:33.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:06:32 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:06:33.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:06:32 vm04 ceph-mon[51053]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:06:33.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:06:32 vm04 ceph-mon[51053]: pgmap v115: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:06:33.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:06:32 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:06:33.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:06:32 vm04 ceph-mon[46823]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:06:33.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:06:32 vm04 ceph-mon[46823]: pgmap v115: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:06:33.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:06:32 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:06:33.578 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:06:33 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:06:33] "GET /metrics HTTP/1.1" 200 37535 "" "Prometheus/2.51.0" 2026-03-09T00:06:34.578 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:06:34 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[70647]: ts=2026-03-09T00:06:34.147Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"fdcbddf6-1b49-11f1-80b0-7392062373f9\", cluster_addr=\"192.168.123.104\", device_class=\"hdd\", hostname=\"vm04\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.104\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.104\", device_class=\"hdd\", hostname=\"vm04\", instance=\"192.168.123.110:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.104\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T00:06:35.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:06:34 vm10 ceph-mon[48982]: pgmap v116: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:06:35.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:06:34 vm04 ceph-mon[51053]: pgmap v116: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:06:35.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:06:34 vm04 ceph-mon[46823]: pgmap v116: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:06:37.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:06:36 vm10 ceph-mon[48982]: pgmap v117: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:06:37.078 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:06:36 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[70647]: ts=2026-03-09T00:06:36.949Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm04\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"fdcbddf6-1b49-11f1-80b0-7392062373f9\", domainname=\"(none)\", instance=\"vm04\", job=\"node\", machine=\"x86_64\", nodename=\"vm04\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm04\", job=\"node\", machine=\"x86_64\", nodename=\"vm04\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T00:06:37.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:06:36 vm04 ceph-mon[51053]: pgmap v117: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:06:37.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:06:36 vm04 ceph-mon[46823]: pgmap v117: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:06:39.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:06:38 vm10 ceph-mon[48982]: pgmap v118: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:06:39.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:06:38 vm04 ceph-mon[51053]: pgmap v118: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:06:39.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:06:38 vm04 ceph-mon[46823]: pgmap v118: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:06:41.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:06:40 vm10 ceph-mon[48982]: pgmap v119: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:06:41.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:06:40 vm04 ceph-mon[51053]: pgmap v119: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:06:41.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:06:40 vm04 ceph-mon[46823]: pgmap v119: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:06:43.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:06:42 vm10 ceph-mon[48982]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:06:43.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:06:42 vm10 ceph-mon[48982]: pgmap v120: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:06:43.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:06:42 vm04 ceph-mon[51053]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:06:43.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:06:42 vm04 ceph-mon[51053]: pgmap v120: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:06:43.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:06:42 vm04 ceph-mon[46823]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:06:43.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:06:42 vm04 ceph-mon[46823]: pgmap v120: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:06:43.578 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:06:43 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:06:43] "GET /metrics HTTP/1.1" 200 37535 "" "Prometheus/2.51.0" 2026-03-09T00:06:44.578 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:06:44 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[70647]: ts=2026-03-09T00:06:44.146Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"fdcbddf6-1b49-11f1-80b0-7392062373f9\", cluster_addr=\"192.168.123.104\", device_class=\"hdd\", hostname=\"vm04\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.104\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.104\", device_class=\"hdd\", hostname=\"vm04\", instance=\"192.168.123.110:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.104\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T00:06:45.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:06:44 vm10 ceph-mon[48982]: pgmap v121: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:06:45.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:06:44 vm04 ceph-mon[51053]: pgmap v121: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:06:45.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:06:44 vm04 ceph-mon[46823]: pgmap v121: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:06:47.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:06:46 vm10 ceph-mon[48982]: pgmap v122: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:06:47.078 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:06:46 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[70647]: ts=2026-03-09T00:06:46.948Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm04\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"fdcbddf6-1b49-11f1-80b0-7392062373f9\", domainname=\"(none)\", instance=\"vm04\", job=\"node\", machine=\"x86_64\", nodename=\"vm04\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm04\", job=\"node\", machine=\"x86_64\", nodename=\"vm04\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T00:06:47.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:06:46 vm04 ceph-mon[51053]: pgmap v122: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:06:47.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:06:46 vm04 ceph-mon[46823]: pgmap v122: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:06:48.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:06:47 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:06:48.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:06:47 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:06:48.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:06:47 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:06:49.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:06:48 vm10 ceph-mon[48982]: pgmap v123: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:06:49.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:06:48 vm04 ceph-mon[51053]: pgmap v123: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:06:49.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:06:48 vm04 ceph-mon[46823]: pgmap v123: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:06:51.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:06:50 vm04 ceph-mon[51053]: pgmap v124: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:06:51.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:06:50 vm04 ceph-mon[46823]: pgmap v124: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:06:51.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:06:50 vm10 ceph-mon[48982]: pgmap v124: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:06:53.099 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:06:52 vm10 ceph-mon[48982]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:06:53.099 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:06:52 vm10 ceph-mon[48982]: pgmap v125: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:06:53.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:06:52 vm04 ceph-mon[51053]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:06:53.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:06:52 vm04 ceph-mon[51053]: pgmap v125: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:06:53.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:06:52 vm04 ceph-mon[46823]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:06:53.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:06:52 vm04 ceph-mon[46823]: pgmap v125: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:06:53.577 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:06:53 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:06:53] "GET /metrics HTTP/1.1" 200 37533 "" "Prometheus/2.51.0" 2026-03-09T00:06:54.578 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:06:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[70647]: ts=2026-03-09T00:06:54.146Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"fdcbddf6-1b49-11f1-80b0-7392062373f9\", cluster_addr=\"192.168.123.104\", device_class=\"hdd\", hostname=\"vm04\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.104\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.104\", device_class=\"hdd\", hostname=\"vm04\", instance=\"192.168.123.110:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.104\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T00:06:55.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:06:54 vm10 ceph-mon[48982]: pgmap v126: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:06:55.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:06:54 vm04 ceph-mon[51053]: pgmap v126: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:06:55.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:06:54 vm04 ceph-mon[46823]: pgmap v126: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:06:57.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:06:56 vm10 ceph-mon[48982]: pgmap v127: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:06:57.328 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:06:56 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[70647]: ts=2026-03-09T00:06:56.948Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm04\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"fdcbddf6-1b49-11f1-80b0-7392062373f9\", domainname=\"(none)\", instance=\"vm04\", job=\"node\", machine=\"x86_64\", nodename=\"vm04\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm04\", job=\"node\", machine=\"x86_64\", nodename=\"vm04\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T00:06:57.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:06:56 vm04 ceph-mon[51053]: pgmap v127: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:06:57.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:06:56 vm04 ceph-mon[46823]: pgmap v127: 161 pgs: 161 active+clean; 457 KiB data, 87 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:06:59.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:06:58 vm10 ceph-mon[48982]: pgmap v128: 161 pgs: 161 active+clean; 457 KiB data, 91 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 0 B/s wr, 24 op/s 2026-03-09T00:06:59.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:06:58 vm04 ceph-mon[51053]: pgmap v128: 161 pgs: 161 active+clean; 457 KiB data, 91 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 0 B/s wr, 24 op/s 2026-03-09T00:06:59.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:06:58 vm04 ceph-mon[46823]: pgmap v128: 161 pgs: 161 active+clean; 457 KiB data, 91 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 0 B/s wr, 24 op/s 2026-03-09T00:07:01.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:07:00 vm10 ceph-mon[48982]: pgmap v129: 161 pgs: 161 active+clean; 457 KiB data, 91 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 0 B/s wr, 24 op/s 2026-03-09T00:07:01.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:07:00 vm04 ceph-mon[51053]: pgmap v129: 161 pgs: 161 active+clean; 457 KiB data, 91 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 0 B/s wr, 24 op/s 2026-03-09T00:07:01.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:07:00 vm04 ceph-mon[46823]: pgmap v129: 161 pgs: 161 active+clean; 457 KiB data, 91 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 0 B/s wr, 24 op/s 2026-03-09T00:07:03.130 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:07:02 vm10 ceph-mon[48982]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:07:03.130 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:07:02 vm10 ceph-mon[48982]: pgmap v130: 161 pgs: 161 active+clean; 457 KiB data, 91 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 0 B/s wr, 24 op/s 2026-03-09T00:07:03.130 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:07:02 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:07:03.130 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:07:03 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:07:03] "GET /metrics HTTP/1.1" 200 37529 "" "Prometheus/2.51.0" 2026-03-09T00:07:03.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:07:02 vm04 ceph-mon[51053]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:07:03.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:07:02 vm04 ceph-mon[51053]: pgmap v130: 161 pgs: 161 active+clean; 457 KiB data, 91 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 0 B/s wr, 24 op/s 2026-03-09T00:07:03.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:07:02 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:07:03.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:07:02 vm04 ceph-mon[46823]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:07:03.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:07:02 vm04 ceph-mon[46823]: pgmap v130: 161 pgs: 161 active+clean; 457 KiB data, 91 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 0 B/s wr, 24 op/s 2026-03-09T00:07:03.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:07:02 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:07:04.578 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:07:04 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[70647]: ts=2026-03-09T00:07:04.147Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"fdcbddf6-1b49-11f1-80b0-7392062373f9\", cluster_addr=\"192.168.123.104\", device_class=\"hdd\", hostname=\"vm04\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.104\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.104\", device_class=\"hdd\", hostname=\"vm04\", instance=\"192.168.123.110:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.104\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T00:07:05.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:07:04 vm10 ceph-mon[48982]: pgmap v131: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 73 KiB/s rd, 0 B/s wr, 120 op/s 2026-03-09T00:07:05.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:07:04 vm04 ceph-mon[51053]: pgmap v131: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 73 KiB/s rd, 0 B/s wr, 120 op/s 2026-03-09T00:07:05.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:07:04 vm04 ceph-mon[46823]: pgmap v131: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 73 KiB/s rd, 0 B/s wr, 120 op/s 2026-03-09T00:07:07.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:07:06 vm10 ceph-mon[48982]: pgmap v132: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 72 KiB/s rd, 0 B/s wr, 119 op/s 2026-03-09T00:07:07.328 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:07:06 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[70647]: ts=2026-03-09T00:07:06.948Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm04\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"fdcbddf6-1b49-11f1-80b0-7392062373f9\", domainname=\"(none)\", instance=\"vm04\", job=\"node\", machine=\"x86_64\", nodename=\"vm04\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm04\", job=\"node\", machine=\"x86_64\", nodename=\"vm04\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T00:07:07.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:07:06 vm04 ceph-mon[51053]: pgmap v132: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 72 KiB/s rd, 0 B/s wr, 119 op/s 2026-03-09T00:07:07.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:07:06 vm04 ceph-mon[46823]: pgmap v132: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 72 KiB/s rd, 0 B/s wr, 119 op/s 2026-03-09T00:07:09.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:07:08 vm10 ceph-mon[48982]: pgmap v133: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 73 KiB/s rd, 0 B/s wr, 120 op/s 2026-03-09T00:07:09.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:07:08 vm04 ceph-mon[46823]: pgmap v133: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 73 KiB/s rd, 0 B/s wr, 120 op/s 2026-03-09T00:07:09.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:07:08 vm04 ceph-mon[51053]: pgmap v133: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 73 KiB/s rd, 0 B/s wr, 120 op/s 2026-03-09T00:07:10.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:07:09 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:07:10.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:07:09 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:07:10.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:07:09 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:07:10.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:07:09 vm10 ceph-mon[48982]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:07:10.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:07:09 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:07:10.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:07:09 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:07:10.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:07:09 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:07:10.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:07:09 vm04 ceph-mon[46823]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:07:10.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:07:09 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:07:10.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:07:09 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:07:10.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:07:09 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:07:10.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:07:09 vm04 ceph-mon[51053]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:07:11.532 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:07:11 vm04 ceph-mon[46823]: pgmap v134: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 58 KiB/s rd, 0 B/s wr, 96 op/s 2026-03-09T00:07:11.532 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:07:11 vm04 ceph-mon[51053]: pgmap v134: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 58 KiB/s rd, 0 B/s wr, 96 op/s 2026-03-09T00:07:11.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:07:11 vm10 ceph-mon[48982]: pgmap v134: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 58 KiB/s rd, 0 B/s wr, 96 op/s 2026-03-09T00:07:13.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:07:13 vm10 ceph-mon[48982]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:07:13.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:07:13 vm10 ceph-mon[48982]: pgmap v135: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 58 KiB/s rd, 0 B/s wr, 96 op/s 2026-03-09T00:07:13.578 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:07:13 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:07:13] "GET /metrics HTTP/1.1" 200 37529 "" "Prometheus/2.51.0" 2026-03-09T00:07:13.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:07:13 vm04 ceph-mon[46823]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:07:13.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:07:13 vm04 ceph-mon[46823]: pgmap v135: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 58 KiB/s rd, 0 B/s wr, 96 op/s 2026-03-09T00:07:13.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:07:13 vm04 ceph-mon[51053]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:07:13.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:07:13 vm04 ceph-mon[51053]: pgmap v135: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 58 KiB/s rd, 0 B/s wr, 96 op/s 2026-03-09T00:07:14.578 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:07:14 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[70647]: ts=2026-03-09T00:07:14.147Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"fdcbddf6-1b49-11f1-80b0-7392062373f9\", cluster_addr=\"192.168.123.104\", device_class=\"hdd\", hostname=\"vm04\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.104\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.104\", device_class=\"hdd\", hostname=\"vm04\", instance=\"192.168.123.110:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.104\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T00:07:15.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:07:15 vm10 ceph-mon[48982]: pgmap v136: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 59 KiB/s rd, 0 B/s wr, 96 op/s 2026-03-09T00:07:15.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:07:15 vm04 ceph-mon[46823]: pgmap v136: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 59 KiB/s rd, 0 B/s wr, 96 op/s 2026-03-09T00:07:15.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:07:15 vm04 ceph-mon[51053]: pgmap v136: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 59 KiB/s rd, 0 B/s wr, 96 op/s 2026-03-09T00:07:17.243 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:07:16 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[70647]: ts=2026-03-09T00:07:16.948Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm04\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"fdcbddf6-1b49-11f1-80b0-7392062373f9\", domainname=\"(none)\", instance=\"vm04\", job=\"node\", machine=\"x86_64\", nodename=\"vm04\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm04\", job=\"node\", machine=\"x86_64\", nodename=\"vm04\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T00:07:17.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:07:17 vm10 ceph-mon[48982]: pgmap v137: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:07:17.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:07:17 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:07:17.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:07:17 vm04 ceph-mon[46823]: pgmap v137: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:07:17.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:07:17 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:07:17.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:07:17 vm04 ceph-mon[51053]: pgmap v137: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:07:17.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:07:17 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:07:19.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:07:19 vm10 ceph-mon[48982]: pgmap v138: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:07:19.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:07:19 vm04 ceph-mon[46823]: pgmap v138: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:07:19.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:07:19 vm04 ceph-mon[51053]: pgmap v138: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:07:21.534 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:07:21 vm04 ceph-mon[46823]: pgmap v139: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:07:21.535 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:07:21 vm04 ceph-mon[51053]: pgmap v139: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:07:21.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:07:21 vm10 ceph-mon[48982]: pgmap v139: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:07:23.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:07:23 vm10 ceph-mon[48982]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:07:23.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:07:23 vm10 ceph-mon[48982]: pgmap v140: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:07:23.578 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:07:23 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:07:23] "GET /metrics HTTP/1.1" 200 37539 "" "Prometheus/2.51.0" 2026-03-09T00:07:23.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:07:23 vm04 ceph-mon[46823]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:07:23.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:07:23 vm04 ceph-mon[46823]: pgmap v140: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:07:23.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:07:23 vm04 ceph-mon[51053]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:07:23.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:07:23 vm04 ceph-mon[51053]: pgmap v140: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:07:24.578 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:07:24 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[70647]: ts=2026-03-09T00:07:24.146Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"fdcbddf6-1b49-11f1-80b0-7392062373f9\", cluster_addr=\"192.168.123.104\", device_class=\"hdd\", hostname=\"vm04\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.104\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.104\", device_class=\"hdd\", hostname=\"vm04\", instance=\"192.168.123.110:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.104\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T00:07:25.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:07:25 vm10 ceph-mon[48982]: pgmap v141: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:07:25.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:07:25 vm04 ceph-mon[46823]: pgmap v141: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:07:25.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:07:25 vm04 ceph-mon[51053]: pgmap v141: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:07:27.266 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:07:26 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[70647]: ts=2026-03-09T00:07:26.948Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm04\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"fdcbddf6-1b49-11f1-80b0-7392062373f9\", domainname=\"(none)\", instance=\"vm04\", job=\"node\", machine=\"x86_64\", nodename=\"vm04\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm04\", job=\"node\", machine=\"x86_64\", nodename=\"vm04\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T00:07:27.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:07:27 vm10 ceph-mon[48982]: pgmap v142: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 767 B/s rd, 0 op/s 2026-03-09T00:07:27.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:07:27 vm04 ceph-mon[46823]: pgmap v142: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 767 B/s rd, 0 op/s 2026-03-09T00:07:27.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:07:27 vm04 ceph-mon[51053]: pgmap v142: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 767 B/s rd, 0 op/s 2026-03-09T00:07:29.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:07:29 vm10 ceph-mon[48982]: pgmap v143: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:07:29.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:07:29 vm04 ceph-mon[46823]: pgmap v143: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:07:29.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:07:29 vm04 ceph-mon[51053]: pgmap v143: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:07:31.542 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:07:31 vm04 ceph-mon[46823]: pgmap v144: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 767 B/s rd, 0 op/s 2026-03-09T00:07:31.542 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:07:31 vm04 ceph-mon[51053]: pgmap v144: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 767 B/s rd, 0 op/s 2026-03-09T00:07:31.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:07:31 vm10 ceph-mon[48982]: pgmap v144: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 767 B/s rd, 0 op/s 2026-03-09T00:07:32.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:07:32 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:07:32.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:07:32 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:07:32.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:07:32 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:07:33.479 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:07:33 vm10 ceph-mon[48982]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:07:33.479 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:07:33 vm10 ceph-mon[48982]: pgmap v145: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 767 B/s rd, 0 op/s 2026-03-09T00:07:33.479 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:07:33 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:07:33] "GET /metrics HTTP/1.1" 200 37536 "" "Prometheus/2.51.0" 2026-03-09T00:07:33.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:07:33 vm04 ceph-mon[51053]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:07:33.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:07:33 vm04 ceph-mon[51053]: pgmap v145: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 767 B/s rd, 0 op/s 2026-03-09T00:07:33.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:07:33 vm04 ceph-mon[46823]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:07:33.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:07:33 vm04 ceph-mon[46823]: pgmap v145: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 767 B/s rd, 0 op/s 2026-03-09T00:07:34.577 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:07:34 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[70647]: ts=2026-03-09T00:07:34.147Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"fdcbddf6-1b49-11f1-80b0-7392062373f9\", cluster_addr=\"192.168.123.104\", device_class=\"hdd\", hostname=\"vm04\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.104\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.104\", device_class=\"hdd\", hostname=\"vm04\", instance=\"192.168.123.110:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.104\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T00:07:35.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:07:35 vm10 ceph-mon[48982]: pgmap v146: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:07:35.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:07:35 vm04 ceph-mon[51053]: pgmap v146: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:07:35.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:07:35 vm04 ceph-mon[46823]: pgmap v146: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:07:37.298 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:07:36 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[70647]: ts=2026-03-09T00:07:36.948Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm04\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"fdcbddf6-1b49-11f1-80b0-7392062373f9\", domainname=\"(none)\", instance=\"vm04\", job=\"node\", machine=\"x86_64\", nodename=\"vm04\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm04\", job=\"node\", machine=\"x86_64\", nodename=\"vm04\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T00:07:37.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:07:37 vm10 ceph-mon[48982]: pgmap v147: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:07:37.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:07:37 vm04 ceph-mon[51053]: pgmap v147: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:07:37.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:07:37 vm04 ceph-mon[46823]: pgmap v147: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:07:39.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:07:39 vm10 ceph-mon[48982]: pgmap v148: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:07:39.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:07:39 vm04 ceph-mon[51053]: pgmap v148: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:07:39.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:07:39 vm04 ceph-mon[46823]: pgmap v148: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:07:41.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:07:41 vm10 ceph-mon[48982]: pgmap v149: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:07:41.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:07:41 vm04 ceph-mon[51053]: pgmap v149: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:07:41.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:07:41 vm04 ceph-mon[46823]: pgmap v149: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:07:43.577 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:07:43 vm10 ceph-mon[48982]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:07:43.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:07:43 vm10 ceph-mon[48982]: pgmap v150: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:07:43.578 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:07:43 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:07:43] "GET /metrics HTTP/1.1" 200 37536 "" "Prometheus/2.51.0" 2026-03-09T00:07:43.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:07:43 vm04 ceph-mon[51053]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:07:43.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:07:43 vm04 ceph-mon[51053]: pgmap v150: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:07:43.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:07:43 vm04 ceph-mon[46823]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:07:43.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:07:43 vm04 ceph-mon[46823]: pgmap v150: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:07:44.578 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:07:44 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[70647]: ts=2026-03-09T00:07:44.147Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"fdcbddf6-1b49-11f1-80b0-7392062373f9\", cluster_addr=\"192.168.123.104\", device_class=\"hdd\", hostname=\"vm04\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.104\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.104\", device_class=\"hdd\", hostname=\"vm04\", instance=\"192.168.123.110:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.104\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T00:07:45.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:07:45 vm04 ceph-mon[51053]: pgmap v151: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:07:45.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:07:45 vm04 ceph-mon[46823]: pgmap v151: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:07:45.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:07:45 vm10 ceph-mon[48982]: pgmap v151: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:07:47.328 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:07:46 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[70647]: ts=2026-03-09T00:07:46.948Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm04\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"fdcbddf6-1b49-11f1-80b0-7392062373f9\", domainname=\"(none)\", instance=\"vm04\", job=\"node\", machine=\"x86_64\", nodename=\"vm04\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm04\", job=\"node\", machine=\"x86_64\", nodename=\"vm04\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T00:07:47.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:07:47 vm04 ceph-mon[51053]: pgmap v152: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:07:47.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:07:47 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:07:47.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:07:47 vm04 ceph-mon[46823]: pgmap v152: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:07:47.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:07:47 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:07:47.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:07:47 vm10 ceph-mon[48982]: pgmap v152: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:07:47.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:07:47 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:07:49.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:07:49 vm04 ceph-mon[51053]: pgmap v153: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:07:49.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:07:49 vm04 ceph-mon[46823]: pgmap v153: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:07:49.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:07:49 vm10 ceph-mon[48982]: pgmap v153: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:07:51.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:07:51 vm04 ceph-mon[51053]: pgmap v154: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:07:51.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:07:51 vm04 ceph-mon[46823]: pgmap v154: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:07:51.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:07:51 vm10 ceph-mon[48982]: pgmap v154: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:07:53.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:07:53 vm10 ceph-mon[48982]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:07:53.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:07:53 vm10 ceph-mon[48982]: pgmap v155: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:07:53.578 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:07:53 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:07:53] "GET /metrics HTTP/1.1" 200 37535 "" "Prometheus/2.51.0" 2026-03-09T00:07:53.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:07:53 vm04 ceph-mon[51053]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:07:53.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:07:53 vm04 ceph-mon[51053]: pgmap v155: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:07:53.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:07:53 vm04 ceph-mon[46823]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:07:53.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:07:53 vm04 ceph-mon[46823]: pgmap v155: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:07:54.578 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:07:54 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[70647]: ts=2026-03-09T00:07:54.146Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"fdcbddf6-1b49-11f1-80b0-7392062373f9\", cluster_addr=\"192.168.123.104\", device_class=\"hdd\", hostname=\"vm04\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.104\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.104\", device_class=\"hdd\", hostname=\"vm04\", instance=\"192.168.123.110:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.104\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T00:07:55.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:07:55 vm10 ceph-mon[48982]: pgmap v156: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:07:55.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:07:55 vm04 ceph-mon[51053]: pgmap v156: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:07:55.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:07:55 vm04 ceph-mon[46823]: pgmap v156: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:07:57.328 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:07:56 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[70647]: ts=2026-03-09T00:07:56.949Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm04\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"fdcbddf6-1b49-11f1-80b0-7392062373f9\", domainname=\"(none)\", instance=\"vm04\", job=\"node\", machine=\"x86_64\", nodename=\"vm04\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm04\", job=\"node\", machine=\"x86_64\", nodename=\"vm04\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T00:07:57.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:07:57 vm10 ceph-mon[48982]: pgmap v157: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:07:57.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:07:57 vm04 ceph-mon[51053]: pgmap v157: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:07:57.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:07:57 vm04 ceph-mon[46823]: pgmap v157: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:07:59.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:07:59 vm10 ceph-mon[48982]: pgmap v158: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:07:59.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:07:59 vm04 ceph-mon[51053]: pgmap v158: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:07:59.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:07:59 vm04 ceph-mon[46823]: pgmap v158: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:08:01.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:08:01 vm10 ceph-mon[48982]: pgmap v159: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:08:01.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:08:01 vm04 ceph-mon[51053]: pgmap v159: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:08:01.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:08:01 vm04 ceph-mon[46823]: pgmap v159: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:08:02.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:08:02 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:08:02.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:08:02 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:08:02.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:08:02 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:08:03.388 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:08:03 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:08:03] "GET /metrics HTTP/1.1" 200 37534 "" "Prometheus/2.51.0" 2026-03-09T00:08:03.752 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:08:03 vm04 ceph-mon[51053]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:08:03.752 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:08:03 vm04 ceph-mon[51053]: pgmap v160: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:08:03.752 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:08:03 vm04 ceph-mon[46823]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:08:03.752 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:08:03 vm04 ceph-mon[46823]: pgmap v160: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:08:03.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:08:03 vm10 ceph-mon[48982]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:08:03.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:08:03 vm10 ceph-mon[48982]: pgmap v160: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:08:04.578 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:08:04 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[70647]: ts=2026-03-09T00:08:04.147Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"fdcbddf6-1b49-11f1-80b0-7392062373f9\", cluster_addr=\"192.168.123.104\", device_class=\"hdd\", hostname=\"vm04\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.104\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.104\", device_class=\"hdd\", hostname=\"vm04\", instance=\"192.168.123.110:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.104\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T00:08:05.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:08:05 vm10 ceph-mon[48982]: pgmap v161: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:08:05.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:08:05 vm04 ceph-mon[51053]: pgmap v161: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:08:05.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:08:05 vm04 ceph-mon[46823]: pgmap v161: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:08:07.328 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:08:06 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[70647]: ts=2026-03-09T00:08:06.948Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm04\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"fdcbddf6-1b49-11f1-80b0-7392062373f9\", domainname=\"(none)\", instance=\"vm04\", job=\"node\", machine=\"x86_64\", nodename=\"vm04\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm04\", job=\"node\", machine=\"x86_64\", nodename=\"vm04\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T00:08:07.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:08:07 vm10 ceph-mon[48982]: pgmap v162: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:08:07.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:08:07 vm04 ceph-mon[51053]: pgmap v162: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:08:07.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:08:07 vm04 ceph-mon[46823]: pgmap v162: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:08:09.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:08:09 vm10 ceph-mon[48982]: pgmap v163: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:08:09.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:08:09 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:08:09.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:08:09 vm04 ceph-mon[51053]: pgmap v163: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:08:09.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:08:09 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:08:09.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:08:09 vm04 ceph-mon[46823]: pgmap v163: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:08:09.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:08:09 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:08:10.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:08:10 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:08:10.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:08:10 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:08:10.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:08:10 vm10 ceph-mon[48982]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:08:10.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:08:10 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:08:10.854 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:08:10 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:08:10.854 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:08:10 vm04 ceph-mon[46823]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:08:10.854 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:08:10 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:08:10.854 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:08:10 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:08:10.854 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:08:10 vm04 ceph-mon[51053]: from='mgr.24772 ' entity='mgr.x' 2026-03-09T00:08:11.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:08:11 vm10 ceph-mon[48982]: pgmap v164: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:08:11.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:08:11 vm04 ceph-mon[46823]: pgmap v164: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:08:11.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:08:11 vm04 ceph-mon[51053]: pgmap v164: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:08:13.432 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:08:13 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:08:13] "GET /metrics HTTP/1.1" 200 37534 "" "Prometheus/2.51.0" 2026-03-09T00:08:13.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:08:13 vm10 ceph-mon[48982]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:08:13.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:08:13 vm10 ceph-mon[48982]: pgmap v165: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:08:13.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:08:13 vm04 ceph-mon[46823]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:08:13.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:08:13 vm04 ceph-mon[46823]: pgmap v165: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:08:13.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:08:13 vm04 ceph-mon[51053]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:08:13.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:08:13 vm04 ceph-mon[51053]: pgmap v165: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:08:14.578 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:08:14 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[70647]: ts=2026-03-09T00:08:14.147Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"fdcbddf6-1b49-11f1-80b0-7392062373f9\", cluster_addr=\"192.168.123.104\", device_class=\"hdd\", hostname=\"vm04\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.104\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.104\", device_class=\"hdd\", hostname=\"vm04\", instance=\"192.168.123.110:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.104\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T00:08:15.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:08:15 vm10 ceph-mon[48982]: pgmap v166: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:08:15.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:08:15 vm04 ceph-mon[46823]: pgmap v166: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:08:15.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:08:15 vm04 ceph-mon[51053]: pgmap v166: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:08:17.328 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:08:16 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[70647]: ts=2026-03-09T00:08:16.948Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm04\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"fdcbddf6-1b49-11f1-80b0-7392062373f9\", domainname=\"(none)\", instance=\"vm04\", job=\"node\", machine=\"x86_64\", nodename=\"vm04\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm04\", job=\"node\", machine=\"x86_64\", nodename=\"vm04\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T00:08:17.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:08:17 vm10 ceph-mon[48982]: pgmap v167: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:08:17.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:08:17 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:08:17.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:08:17 vm04 ceph-mon[46823]: pgmap v167: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:08:17.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:08:17 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:08:17.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:08:17 vm04 ceph-mon[51053]: pgmap v167: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:08:17.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:08:17 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:08:19.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:08:19 vm10 ceph-mon[48982]: pgmap v168: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:08:19.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:08:19 vm04 ceph-mon[46823]: pgmap v168: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:08:19.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:08:19 vm04 ceph-mon[51053]: pgmap v168: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:08:21.827 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:08:21 vm10 ceph-mon[48982]: pgmap v169: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:08:21.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:08:21 vm04 ceph-mon[51053]: pgmap v169: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:08:21.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:08:21 vm04 ceph-mon[46823]: pgmap v169: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:08:23.463 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:08:23 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:08:23] "GET /metrics HTTP/1.1" 200 37536 "" "Prometheus/2.51.0" 2026-03-09T00:08:23.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:08:23 vm10 ceph-mon[48982]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:08:23.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:08:23 vm10 ceph-mon[48982]: pgmap v170: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:08:23.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:08:23 vm04 ceph-mon[51053]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:08:23.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:08:23 vm04 ceph-mon[51053]: pgmap v170: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:08:23.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:08:23 vm04 ceph-mon[46823]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:08:23.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:08:23 vm04 ceph-mon[46823]: pgmap v170: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:08:25.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:08:25 vm10 ceph-mon[48982]: pgmap v171: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:08:25.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:08:25 vm04 ceph-mon[51053]: pgmap v171: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:08:25.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:08:25 vm04 ceph-mon[46823]: pgmap v171: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:08:27.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:08:26 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:08:26.955Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:08:27.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:08:27 vm10 ceph-mon[48982]: pgmap v172: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:08:27.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:08:27 vm04 ceph-mon[51053]: pgmap v172: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:08:27.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:08:27 vm04 ceph-mon[46823]: pgmap v172: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:08:29.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:08:28 vm10 ceph-mon[48982]: pgmap v173: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:08:29.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:08:28 vm04 ceph-mon[51053]: pgmap v173: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:08:29.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:08:28 vm04 ceph-mon[46823]: pgmap v173: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:08:31.077 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:08:30 vm10 ceph-mon[48982]: pgmap v174: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:08:31.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:08:30 vm04 ceph-mon[51053]: pgmap v174: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:08:31.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:08:30 vm04 ceph-mon[46823]: pgmap v174: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:08:33.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:08:32 vm10 ceph-mon[48982]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:08:33.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:08:32 vm10 ceph-mon[48982]: pgmap v175: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:08:33.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:08:32 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:08:33.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:08:32 vm04 ceph-mon[51053]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:08:33.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:08:32 vm04 ceph-mon[51053]: pgmap v175: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:08:33.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:08:32 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:08:33.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:08:32 vm04 ceph-mon[46823]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:08:33.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:08:32 vm04 ceph-mon[46823]: pgmap v175: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:08:33.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:08:32 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:08:33.578 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:08:33 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:08:33] "GET /metrics HTTP/1.1" 200 37537 "" "Prometheus/2.51.0" 2026-03-09T00:08:35.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:08:34 vm10 ceph-mon[48982]: pgmap v176: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:08:35.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:08:34 vm04 ceph-mon[51053]: pgmap v176: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:08:35.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:08:34 vm04 ceph-mon[46823]: pgmap v176: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:08:37.100 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:08:36 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:08:36.955Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=1 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:08:37.101 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:08:36 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:08:36.955Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:08:37.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:08:36 vm04 ceph-mon[51053]: pgmap v177: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:08:37.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:08:36 vm04 ceph-mon[46823]: pgmap v177: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:08:37.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:08:36 vm10 ceph-mon[48982]: pgmap v177: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:08:39.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:08:38 vm04 ceph-mon[51053]: pgmap v178: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:08:39.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:08:38 vm04 ceph-mon[46823]: pgmap v178: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:08:39.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:08:38 vm10 ceph-mon[48982]: pgmap v178: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:08:41.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:08:40 vm10 ceph-mon[48982]: pgmap v179: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:08:41.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:08:40 vm04 ceph-mon[51053]: pgmap v179: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:08:41.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:08:40 vm04 ceph-mon[46823]: pgmap v179: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:08:43.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:08:42 vm10 ceph-mon[48982]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:08:43.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:08:42 vm10 ceph-mon[48982]: pgmap v180: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:08:43.328 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:08:43 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:08:43] "GET /metrics HTTP/1.1" 200 37537 "" "Prometheus/2.51.0" 2026-03-09T00:08:43.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:08:42 vm04 ceph-mon[51053]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:08:43.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:08:42 vm04 ceph-mon[51053]: pgmap v180: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:08:43.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:08:42 vm04 ceph-mon[46823]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:08:43.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:08:42 vm04 ceph-mon[46823]: pgmap v180: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:08:45.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:08:44 vm10 ceph-mon[48982]: pgmap v181: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:08:45.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:08:44 vm04 ceph-mon[51053]: pgmap v181: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:08:45.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:08:44 vm04 ceph-mon[46823]: pgmap v181: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:08:47.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:08:46 vm10 ceph-mon[48982]: pgmap v182: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:08:47.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:08:46 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:08:46.956Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:08:47.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:08:46 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:08:46.956Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:08:47.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:08:46 vm04 ceph-mon[51053]: pgmap v182: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:08:47.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:08:46 vm04 ceph-mon[46823]: pgmap v182: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:08:48.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:08:47 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:08:48.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:08:47 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:08:48.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:08:47 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:08:49.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:08:48 vm10 ceph-mon[48982]: pgmap v183: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:08:49.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:08:48 vm04 ceph-mon[51053]: pgmap v183: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:08:49.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:08:48 vm04 ceph-mon[46823]: pgmap v183: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:08:51.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:08:50 vm10 ceph-mon[48982]: pgmap v184: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:08:51.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:08:50 vm04 ceph-mon[51053]: pgmap v184: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:08:51.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:08:50 vm04 ceph-mon[46823]: pgmap v184: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:08:53.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:08:52 vm10 ceph-mon[48982]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:08:53.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:08:52 vm10 ceph-mon[48982]: pgmap v185: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:08:53.328 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:08:53 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:08:53] "GET /metrics HTTP/1.1" 200 37531 "" "Prometheus/2.51.0" 2026-03-09T00:08:53.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:08:52 vm04 ceph-mon[51053]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:08:53.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:08:52 vm04 ceph-mon[51053]: pgmap v185: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:08:53.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:08:52 vm04 ceph-mon[46823]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:08:53.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:08:52 vm04 ceph-mon[46823]: pgmap v185: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:08:55.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:08:54 vm10 ceph-mon[48982]: pgmap v186: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:08:55.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:08:54 vm04 ceph-mon[51053]: pgmap v186: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:08:55.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:08:54 vm04 ceph-mon[46823]: pgmap v186: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:08:57.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:08:56 vm10 ceph-mon[48982]: pgmap v187: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:08:57.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:08:56 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:08:56.957Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:08:57.351 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:08:56 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:08:56.958Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:08:57.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:08:56 vm04 ceph-mon[51053]: pgmap v187: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:08:57.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:08:56 vm04 ceph-mon[46823]: pgmap v187: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:08:59.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:08:58 vm10 ceph-mon[48982]: pgmap v188: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:08:59.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:08:58 vm04 ceph-mon[51053]: pgmap v188: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:08:59.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:08:58 vm04 ceph-mon[46823]: pgmap v188: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:09:01.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:01 vm10 ceph-mon[48982]: pgmap v189: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:09:01.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:01 vm04 ceph-mon[51053]: pgmap v189: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:09:01.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:01 vm04 ceph-mon[46823]: pgmap v189: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:09:02.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:02 vm10 ceph-mon[48982]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:09:02.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:02 vm04 ceph-mon[51053]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:09:02.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:02 vm04 ceph-mon[46823]: from='mgr.24772 192.168.123.110:0/2599593701' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:09:03.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:03 vm10 ceph-mon[48982]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:09:03.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:03 vm10 ceph-mon[48982]: pgmap v190: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:09:03.328 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:09:03 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:09:03] "GET /metrics HTTP/1.1" 200 37537 "" "Prometheus/2.51.0" 2026-03-09T00:09:03.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:03 vm04 ceph-mon[51053]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:09:03.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:03 vm04 ceph-mon[51053]: pgmap v190: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:09:03.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:03 vm04 ceph-mon[46823]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:09:03.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:03 vm04 ceph-mon[46823]: pgmap v190: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:09:04.097 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch ps' 2026-03-09T00:09:04.664 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-09T00:09:04.665 INFO:teuthology.orchestra.run.vm04.stdout:alertmanager.a vm04 *:9093,9094 running (5m) 2m ago 12m 22.8M - 0.25.0 c8568f914cd2 bcac0140b0f6 2026-03-09T00:09:04.665 INFO:teuthology.orchestra.run.vm04.stdout:grafana.a vm10 *:3000 running (5m) 2m ago 12m 47.7M - dad864ee21e9 9fb25843918b 2026-03-09T00:09:04.665 INFO:teuthology.orchestra.run.vm04.stdout:iscsi.foo.vm04.fbyciv vm04 running (5m) 2m ago 12m 49.6M - 3.5 e1d6a67b021e c94e791a5738 2026-03-09T00:09:04.665 INFO:teuthology.orchestra.run.vm04.stdout:mgr.x vm10 *:8443,9283 running (7m) 2m ago 14m 559M - 19.2.3-678-ge911bdeb 654f31e6858e 2d7d59a967f3 2026-03-09T00:09:04.665 INFO:teuthology.orchestra.run.vm04.stdout:mgr.y vm04 *:8443,9283,8765 running (2m) 2m ago 15m 56.0M - 19.2.3-678-ge911bdeb 654f31e6858e 72a51572b51b 2026-03-09T00:09:04.665 INFO:teuthology.orchestra.run.vm04.stdout:mon.a vm04 running (15m) 2m ago 15m 58.3M 2048M 17.2.0 e1d6a67b021e a0a441d060f5 2026-03-09T00:09:04.665 INFO:teuthology.orchestra.run.vm04.stdout:mon.b vm10 running (14m) 2m ago 14m 47.4M 2048M 17.2.0 e1d6a67b021e a4c3c4f2dde9 2026-03-09T00:09:04.665 INFO:teuthology.orchestra.run.vm04.stdout:mon.c vm04 running (14m) 2m ago 14m 42.0M 2048M 17.2.0 e1d6a67b021e 5c2d9165643c 2026-03-09T00:09:04.665 INFO:teuthology.orchestra.run.vm04.stdout:node-exporter.a vm04 *:9100 running (5m) 2m ago 12m 9579k - 1.7.0 72c9c2088986 38e0af6b2fbf 2026-03-09T00:09:04.665 INFO:teuthology.orchestra.run.vm04.stdout:node-exporter.b vm10 *:9100 running (5m) 2m ago 12m 9.95M - 1.7.0 72c9c2088986 d059c0022310 2026-03-09T00:09:04.665 INFO:teuthology.orchestra.run.vm04.stdout:osd.0 vm04 running (14m) 2m ago 14m 55.1M 4096M 17.2.0 e1d6a67b021e eb4d6ee04c91 2026-03-09T00:09:04.665 INFO:teuthology.orchestra.run.vm04.stdout:osd.1 vm04 running (13m) 2m ago 13m 53.1M 4096M 17.2.0 e1d6a67b021e f112f05700b8 2026-03-09T00:09:04.665 INFO:teuthology.orchestra.run.vm04.stdout:osd.2 vm04 running (13m) 2m ago 13m 51.0M 4096M 17.2.0 e1d6a67b021e a4ed5ecab7e4 2026-03-09T00:09:04.665 INFO:teuthology.orchestra.run.vm04.stdout:osd.3 vm04 running (13m) 2m ago 13m 51.8M 4096M 17.2.0 e1d6a67b021e d530f6e786d9 2026-03-09T00:09:04.665 INFO:teuthology.orchestra.run.vm04.stdout:osd.4 vm10 running (13m) 2m ago 13m 52.2M 4096M 17.2.0 e1d6a67b021e ad302e6f363c 2026-03-09T00:09:04.665 INFO:teuthology.orchestra.run.vm04.stdout:osd.5 vm10 running (13m) 2m ago 13m 52.0M 4096M 17.2.0 e1d6a67b021e 1bef86fdb303 2026-03-09T00:09:04.665 INFO:teuthology.orchestra.run.vm04.stdout:osd.6 vm10 running (13m) 2m ago 13m 51.0M 4096M 17.2.0 e1d6a67b021e 168db5828111 2026-03-09T00:09:04.665 INFO:teuthology.orchestra.run.vm04.stdout:osd.7 vm10 running (12m) 2m ago 12m 54.0M 4096M 17.2.0 e1d6a67b021e bc6bbac15079 2026-03-09T00:09:04.665 INFO:teuthology.orchestra.run.vm04.stdout:prometheus.a vm10 *:9095 running (5m) 2m ago 12m 41.9M - 2.51.0 1d3b7f56885b 98db0255a281 2026-03-09T00:09:04.665 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.ehrfsf vm04 *:8000 running (12m) 2m ago 12m 95.3M - 17.2.0 e1d6a67b021e a815abb0c790 2026-03-09T00:09:04.665 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm10.dwizvi vm10 *:8000 running (12m) 2m ago 12m 93.2M - 17.2.0 e1d6a67b021e f6412acdf6e0 2026-03-09T00:09:04.717 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph versions' 2026-03-09T00:09:05.289 INFO:teuthology.orchestra.run.vm04.stdout:{ 2026-03-09T00:09:05.289 INFO:teuthology.orchestra.run.vm04.stdout: "mon": { 2026-03-09T00:09:05.289 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 3 2026-03-09T00:09:05.289 INFO:teuthology.orchestra.run.vm04.stdout: }, 2026-03-09T00:09:05.289 INFO:teuthology.orchestra.run.vm04.stdout: "mgr": { 2026-03-09T00:09:05.289 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-09T00:09:05.289 INFO:teuthology.orchestra.run.vm04.stdout: }, 2026-03-09T00:09:05.289 INFO:teuthology.orchestra.run.vm04.stdout: "osd": { 2026-03-09T00:09:05.289 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 8 2026-03-09T00:09:05.289 INFO:teuthology.orchestra.run.vm04.stdout: }, 2026-03-09T00:09:05.289 INFO:teuthology.orchestra.run.vm04.stdout: "mds": {}, 2026-03-09T00:09:05.289 INFO:teuthology.orchestra.run.vm04.stdout: "rgw": { 2026-03-09T00:09:05.290 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 2 2026-03-09T00:09:05.290 INFO:teuthology.orchestra.run.vm04.stdout: }, 2026-03-09T00:09:05.290 INFO:teuthology.orchestra.run.vm04.stdout: "overall": { 2026-03-09T00:09:05.290 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 13, 2026-03-09T00:09:05.290 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-09T00:09:05.290 INFO:teuthology.orchestra.run.vm04.stdout: } 2026-03-09T00:09:05.290 INFO:teuthology.orchestra.run.vm04.stdout:} 2026-03-09T00:09:05.290 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:05 vm04 ceph-mon[46823]: pgmap v191: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:09:05.290 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:05 vm04 ceph-mon[51053]: pgmap v191: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:09:05.350 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph health detail' 2026-03-09T00:09:05.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:05 vm10 ceph-mon[48982]: pgmap v191: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:09:05.942 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_OK 2026-03-09T00:09:06.045 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph -s' 2026-03-09T00:09:06.226 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:06 vm04 ceph-mon[51053]: from='client.14988 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:09:06.226 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:06 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/2035576509' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:09:06.226 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:06 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/734008686' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T00:09:06.226 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:06 vm04 ceph-mon[46823]: from='client.14988 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:09:06.226 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:06 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/2035576509' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:09:06.227 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:06 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/734008686' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T00:09:06.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:06 vm10 ceph-mon[48982]: from='client.14988 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:09:06.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:06 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/2035576509' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:09:06.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:06 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/734008686' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T00:09:06.683 INFO:teuthology.orchestra.run.vm04.stdout: cluster: 2026-03-09T00:09:06.683 INFO:teuthology.orchestra.run.vm04.stdout: id: fdcbddf6-1b49-11f1-80b0-7392062373f9 2026-03-09T00:09:06.683 INFO:teuthology.orchestra.run.vm04.stdout: health: HEALTH_OK 2026-03-09T00:09:06.683 INFO:teuthology.orchestra.run.vm04.stdout: 2026-03-09T00:09:06.683 INFO:teuthology.orchestra.run.vm04.stdout: services: 2026-03-09T00:09:06.683 INFO:teuthology.orchestra.run.vm04.stdout: mon: 3 daemons, quorum a,c,b (age 14m) 2026-03-09T00:09:06.683 INFO:teuthology.orchestra.run.vm04.stdout: mgr: x(active, since 6m), standbys: y 2026-03-09T00:09:06.683 INFO:teuthology.orchestra.run.vm04.stdout: osd: 8 osds: 8 up (since 12m), 8 in (since 13m) 2026-03-09T00:09:06.683 INFO:teuthology.orchestra.run.vm04.stdout: rgw: 2 daemons active (2 hosts, 1 zones) 2026-03-09T00:09:06.683 INFO:teuthology.orchestra.run.vm04.stdout: 2026-03-09T00:09:06.683 INFO:teuthology.orchestra.run.vm04.stdout: data: 2026-03-09T00:09:06.683 INFO:teuthology.orchestra.run.vm04.stdout: pools: 6 pools, 161 pgs 2026-03-09T00:09:06.683 INFO:teuthology.orchestra.run.vm04.stdout: objects: 241 objects, 457 KiB 2026-03-09T00:09:06.683 INFO:teuthology.orchestra.run.vm04.stdout: usage: 100 MiB used, 160 GiB / 160 GiB avail 2026-03-09T00:09:06.683 INFO:teuthology.orchestra.run.vm04.stdout: pgs: 161 active+clean 2026-03-09T00:09:06.683 INFO:teuthology.orchestra.run.vm04.stdout: 2026-03-09T00:09:06.683 INFO:teuthology.orchestra.run.vm04.stdout: io: 2026-03-09T00:09:06.683 INFO:teuthology.orchestra.run.vm04.stdout: client: 853 B/s rd, 0 op/s rd, 0 op/s wr 2026-03-09T00:09:06.683 INFO:teuthology.orchestra.run.vm04.stdout: 2026-03-09T00:09:06.754 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph mgr fail' 2026-03-09T00:09:07.351 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:09:06 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:09:06.958Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:09:07.351 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:09:06 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:09:06.960Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:09:07.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:07 vm04 ceph-mon[51053]: pgmap v192: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:09:07.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:07 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/1662763036' entity='client.admin' cmd=[{"prefix": "status"}]: dispatch 2026-03-09T00:09:07.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:07 vm04 ceph-mon[46823]: pgmap v192: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:09:07.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:07 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/1662763036' entity='client.admin' cmd=[{"prefix": "status"}]: dispatch 2026-03-09T00:09:07.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:07 vm10 ceph-mon[48982]: pgmap v192: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:09:07.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:07 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/1662763036' entity='client.admin' cmd=[{"prefix": "status"}]: dispatch 2026-03-09T00:09:08.175 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'sleep 180' 2026-03-09T00:09:08.345 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:09:08 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: [09/Mar/2026:00:09:08] ENGINE Bus STOPPING 2026-03-09T00:09:08.345 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:09:08 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: [09/Mar/2026:00:09:08] ENGINE HTTP Server cherrypy._cpwsgi_server.CPWSGIServer(('::', 9283)) shut down 2026-03-09T00:09:08.345 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:09:08 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: [09/Mar/2026:00:09:08] ENGINE Bus STOPPED 2026-03-09T00:09:08.345 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:08 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/3032313041' entity='client.admin' cmd=[{"prefix": "mgr fail"}]: dispatch 2026-03-09T00:09:08.345 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:08 vm04 ceph-mon[51053]: from='client.? ' entity='client.admin' cmd=[{"prefix": "mgr fail"}]: dispatch 2026-03-09T00:09:08.345 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:08 vm04 ceph-mon[51053]: osdmap e88: 8 total, 8 up, 8 in 2026-03-09T00:09:08.345 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:08 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/3032313041' entity='client.admin' cmd=[{"prefix": "mgr fail"}]: dispatch 2026-03-09T00:09:08.345 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:08 vm04 ceph-mon[46823]: from='client.? ' entity='client.admin' cmd=[{"prefix": "mgr fail"}]: dispatch 2026-03-09T00:09:08.345 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:08 vm04 ceph-mon[46823]: osdmap e88: 8 total, 8 up, 8 in 2026-03-09T00:09:08.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:08 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/3032313041' entity='client.admin' cmd=[{"prefix": "mgr fail"}]: dispatch 2026-03-09T00:09:08.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:08 vm10 ceph-mon[48982]: from='client.? ' entity='client.admin' cmd=[{"prefix": "mgr fail"}]: dispatch 2026-03-09T00:09:08.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:08 vm10 ceph-mon[48982]: osdmap e88: 8 total, 8 up, 8 in 2026-03-09T00:09:08.578 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:09:08 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: ignoring --setuser ceph since I am not root 2026-03-09T00:09:08.578 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:09:08 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: ignoring --setgroup ceph since I am not root 2026-03-09T00:09:08.578 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:09:08 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: 2026-03-09T00:09:08.251+0000 7fb82bd97140 -1 mgr[py] Module alerts has missing NOTIFY_TYPES member 2026-03-09T00:09:08.578 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:09:08 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: 2026-03-09T00:09:08.296+0000 7fb82bd97140 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-09T00:09:08.601 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:09:08 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: [09/Mar/2026:00:09:08] ENGINE Bus STARTING 2026-03-09T00:09:09.078 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:09:08 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: 2026-03-09T00:09:08.759+0000 7fb82bd97140 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-09T00:09:09.101 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:09:08 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: [09/Mar/2026:00:09:08] ENGINE Serving on http://:::9283 2026-03-09T00:09:09.101 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:09:08 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: [09/Mar/2026:00:09:08] ENGINE Bus STARTED 2026-03-09T00:09:09.379 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:09 vm04 ceph-mon[51053]: from='client.? ' entity='client.admin' cmd='[{"prefix": "mgr fail"}]': finished 2026-03-09T00:09:09.379 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:09 vm04 ceph-mon[51053]: mgrmap e27: y(active, starting, since 0.807317s) 2026-03-09T00:09:09.379 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:09 vm04 ceph-mon[51053]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T00:09:09.379 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:09 vm04 ceph-mon[51053]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T00:09:09.380 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:09 vm04 ceph-mon[51053]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T00:09:09.380 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:09 vm04 ceph-mon[51053]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-09T00:09:09.380 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:09 vm04 ceph-mon[51053]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T00:09:09.380 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:09 vm04 ceph-mon[51053]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T00:09:09.380 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:09 vm04 ceph-mon[51053]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T00:09:09.380 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:09 vm04 ceph-mon[51053]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T00:09:09.380 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:09 vm04 ceph-mon[51053]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T00:09:09.380 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:09 vm04 ceph-mon[51053]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T00:09:09.380 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:09 vm04 ceph-mon[51053]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T00:09:09.380 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:09 vm04 ceph-mon[51053]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T00:09:09.380 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:09 vm04 ceph-mon[51053]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-09T00:09:09.380 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:09 vm04 ceph-mon[51053]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-09T00:09:09.380 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:09 vm04 ceph-mon[51053]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-09T00:09:09.380 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:09 vm04 ceph-mon[51053]: Manager daemon y is now available 2026-03-09T00:09:09.380 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:09 vm04 ceph-mon[51053]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:09:09.380 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:09 vm04 ceph-mon[51053]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:09:09.380 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:09 vm04 ceph-mon[51053]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:09:09.380 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:09 vm04 ceph-mon[51053]: from='mgr.24895 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:09:09.380 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:09 vm04 ceph-mon[51053]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T00:09:09.380 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:09 vm04 ceph-mon[51053]: from='mgr.24895 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T00:09:09.381 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:09 vm04 ceph-mon[46823]: from='client.? ' entity='client.admin' cmd='[{"prefix": "mgr fail"}]': finished 2026-03-09T00:09:09.381 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:09 vm04 ceph-mon[46823]: mgrmap e27: y(active, starting, since 0.807317s) 2026-03-09T00:09:09.381 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:09 vm04 ceph-mon[46823]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T00:09:09.381 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:09 vm04 ceph-mon[46823]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T00:09:09.381 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:09 vm04 ceph-mon[46823]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T00:09:09.381 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:09 vm04 ceph-mon[46823]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-09T00:09:09.381 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:09 vm04 ceph-mon[46823]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T00:09:09.381 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:09 vm04 ceph-mon[46823]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T00:09:09.381 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:09 vm04 ceph-mon[46823]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T00:09:09.381 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:09 vm04 ceph-mon[46823]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T00:09:09.381 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:09 vm04 ceph-mon[46823]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T00:09:09.381 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:09 vm04 ceph-mon[46823]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T00:09:09.381 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:09 vm04 ceph-mon[46823]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T00:09:09.381 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:09 vm04 ceph-mon[46823]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T00:09:09.381 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:09 vm04 ceph-mon[46823]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-09T00:09:09.381 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:09 vm04 ceph-mon[46823]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-09T00:09:09.381 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:09 vm04 ceph-mon[46823]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-09T00:09:09.381 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:09 vm04 ceph-mon[46823]: Manager daemon y is now available 2026-03-09T00:09:09.381 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:09 vm04 ceph-mon[46823]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:09:09.381 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:09 vm04 ceph-mon[46823]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:09:09.381 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:09 vm04 ceph-mon[46823]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:09:09.381 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:09 vm04 ceph-mon[46823]: from='mgr.24895 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:09:09.381 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:09 vm04 ceph-mon[46823]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T00:09:09.381 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:09 vm04 ceph-mon[46823]: from='mgr.24895 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T00:09:09.411 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:09 vm10 ceph-mon[48982]: from='client.? ' entity='client.admin' cmd='[{"prefix": "mgr fail"}]': finished 2026-03-09T00:09:09.411 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:09 vm10 ceph-mon[48982]: mgrmap e27: y(active, starting, since 0.807317s) 2026-03-09T00:09:09.411 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:09 vm10 ceph-mon[48982]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T00:09:09.411 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:09 vm10 ceph-mon[48982]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T00:09:09.411 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:09 vm10 ceph-mon[48982]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T00:09:09.411 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:09 vm10 ceph-mon[48982]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-09T00:09:09.411 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:09 vm10 ceph-mon[48982]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T00:09:09.411 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:09 vm10 ceph-mon[48982]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T00:09:09.411 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:09 vm10 ceph-mon[48982]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T00:09:09.411 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:09 vm10 ceph-mon[48982]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T00:09:09.411 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:09 vm10 ceph-mon[48982]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T00:09:09.411 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:09 vm10 ceph-mon[48982]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T00:09:09.411 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:09 vm10 ceph-mon[48982]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T00:09:09.411 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:09 vm10 ceph-mon[48982]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T00:09:09.411 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:09 vm10 ceph-mon[48982]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-09T00:09:09.411 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:09 vm10 ceph-mon[48982]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-09T00:09:09.411 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:09 vm10 ceph-mon[48982]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-09T00:09:09.411 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:09 vm10 ceph-mon[48982]: Manager daemon y is now available 2026-03-09T00:09:09.411 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:09 vm10 ceph-mon[48982]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:09:09.411 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:09 vm10 ceph-mon[48982]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:09:09.411 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:09 vm10 ceph-mon[48982]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:09:09.411 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:09 vm10 ceph-mon[48982]: from='mgr.24895 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:09:09.411 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:09 vm10 ceph-mon[48982]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T00:09:09.411 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:09 vm10 ceph-mon[48982]: from='mgr.24895 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T00:09:09.411 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:09:09 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: 2026-03-09T00:09:09.147+0000 7fb82bd97140 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-09T00:09:09.411 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:09:09 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: /lib64/python3.9/site-packages/scipy/__init__.py:73: UserWarning: NumPy was imported from a Python sub-interpreter but NumPy does not properly support sub-interpreters. This will likely work for most users but might cause hard to track down issues or subtle bugs. A common user of the rare sub-interpreter feature is wsgi which also allows single-interpreter mode. 2026-03-09T00:09:09.411 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:09:09 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: Improvements in the case of bugs are welcome, but is not on the NumPy roadmap, and full support may require significant effort to achieve. 2026-03-09T00:09:09.411 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:09:09 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: from numpy import show_config as show_numpy_config 2026-03-09T00:09:09.411 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:09:09 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: 2026-03-09T00:09:09.257+0000 7fb82bd97140 -1 mgr[py] Module diskprediction_local has missing NOTIFY_TYPES member 2026-03-09T00:09:09.411 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:09:09 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: 2026-03-09T00:09:09.301+0000 7fb82bd97140 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-09T00:09:09.412 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:09:09 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: 2026-03-09T00:09:09.404+0000 7fb82bd97140 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-09T00:09:10.229 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:09:09 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: 2026-03-09T00:09:09.997+0000 7fb82bd97140 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-09T00:09:10.229 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:09:10 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: 2026-03-09T00:09:10.144+0000 7fb82bd97140 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-09T00:09:10.229 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:09:10 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: 2026-03-09T00:09:10.189+0000 7fb82bd97140 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-09T00:09:10.333 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:10 vm04 ceph-mon[46823]: mgrmap e28: y(active, since 2s) 2026-03-09T00:09:10.333 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:10 vm04 ceph-mon[46823]: [09/Mar/2026:00:09:09] ENGINE Bus STARTING 2026-03-09T00:09:10.334 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:10 vm04 ceph-mon[46823]: [09/Mar/2026:00:09:09] ENGINE Serving on http://192.168.123.104:8765 2026-03-09T00:09:10.334 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:10 vm04 ceph-mon[46823]: [09/Mar/2026:00:09:09] ENGINE Serving on https://192.168.123.104:7150 2026-03-09T00:09:10.334 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:10 vm04 ceph-mon[46823]: [09/Mar/2026:00:09:09] ENGINE Bus STARTED 2026-03-09T00:09:10.334 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:10 vm04 ceph-mon[46823]: [09/Mar/2026:00:09:09] ENGINE Client ('192.168.123.104', 39822) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-09T00:09:10.334 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:10 vm04 ceph-mon[46823]: pgmap v4: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail 2026-03-09T00:09:10.334 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:10 vm04 ceph-mon[46823]: from='mgr.24895 ' entity='mgr.y' 2026-03-09T00:09:10.334 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:10 vm04 ceph-mon[46823]: from='mgr.24895 ' entity='mgr.y' 2026-03-09T00:09:10.572 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:10 vm10 ceph-mon[48982]: mgrmap e28: y(active, since 2s) 2026-03-09T00:09:10.572 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:10 vm10 ceph-mon[48982]: [09/Mar/2026:00:09:09] ENGINE Bus STARTING 2026-03-09T00:09:10.572 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:10 vm10 ceph-mon[48982]: [09/Mar/2026:00:09:09] ENGINE Serving on http://192.168.123.104:8765 2026-03-09T00:09:10.572 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:10 vm10 ceph-mon[48982]: [09/Mar/2026:00:09:09] ENGINE Serving on https://192.168.123.104:7150 2026-03-09T00:09:10.572 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:10 vm10 ceph-mon[48982]: [09/Mar/2026:00:09:09] ENGINE Bus STARTED 2026-03-09T00:09:10.572 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:10 vm10 ceph-mon[48982]: [09/Mar/2026:00:09:09] ENGINE Client ('192.168.123.104', 39822) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-09T00:09:10.572 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:10 vm10 ceph-mon[48982]: pgmap v4: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail 2026-03-09T00:09:10.572 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:10 vm10 ceph-mon[48982]: from='mgr.24895 ' entity='mgr.y' 2026-03-09T00:09:10.572 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:10 vm10 ceph-mon[48982]: from='mgr.24895 ' entity='mgr.y' 2026-03-09T00:09:10.572 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:09:10 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: 2026-03-09T00:09:10.227+0000 7fb82bd97140 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-09T00:09:10.572 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:09:10 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: 2026-03-09T00:09:10.276+0000 7fb82bd97140 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-09T00:09:10.572 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:09:10 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: 2026-03-09T00:09:10.338+0000 7fb82bd97140 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-09T00:09:10.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:10 vm04 ceph-mon[51053]: mgrmap e28: y(active, since 2s) 2026-03-09T00:09:10.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:10 vm04 ceph-mon[51053]: [09/Mar/2026:00:09:09] ENGINE Bus STARTING 2026-03-09T00:09:10.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:10 vm04 ceph-mon[51053]: [09/Mar/2026:00:09:09] ENGINE Serving on http://192.168.123.104:8765 2026-03-09T00:09:10.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:10 vm04 ceph-mon[51053]: [09/Mar/2026:00:09:09] ENGINE Serving on https://192.168.123.104:7150 2026-03-09T00:09:10.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:10 vm04 ceph-mon[51053]: [09/Mar/2026:00:09:09] ENGINE Bus STARTED 2026-03-09T00:09:10.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:10 vm04 ceph-mon[51053]: [09/Mar/2026:00:09:09] ENGINE Client ('192.168.123.104', 39822) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-09T00:09:10.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:10 vm04 ceph-mon[51053]: pgmap v4: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail 2026-03-09T00:09:10.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:10 vm04 ceph-mon[51053]: from='mgr.24895 ' entity='mgr.y' 2026-03-09T00:09:10.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:10 vm04 ceph-mon[51053]: from='mgr.24895 ' entity='mgr.y' 2026-03-09T00:09:10.828 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:09:10 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: 2026-03-09T00:09:10.571+0000 7fb82bd97140 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-09T00:09:10.828 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:09:10 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: 2026-03-09T00:09:10.634+0000 7fb82bd97140 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-09T00:09:11.217 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:09:10 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: 2026-03-09T00:09:10.883+0000 7fb82bd97140 -1 mgr[py] Module rgw has missing NOTIFY_TYPES member 2026-03-09T00:09:11.573 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:11 vm10 ceph-mon[48982]: from='mgr.24895 ' entity='mgr.y' 2026-03-09T00:09:11.573 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:11 vm10 ceph-mon[48982]: from='mgr.24895 ' entity='mgr.y' 2026-03-09T00:09:11.573 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:11 vm10 ceph-mon[48982]: from='mgr.24895 ' entity='mgr.y' 2026-03-09T00:09:11.573 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:11 vm10 ceph-mon[48982]: from='mgr.24895 ' entity='mgr.y' 2026-03-09T00:09:11.573 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:11 vm10 ceph-mon[48982]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm04", "name": "osd_memory_target"}]: dispatch 2026-03-09T00:09:11.573 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:11 vm10 ceph-mon[48982]: from='mgr.24895 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm04", "name": "osd_memory_target"}]: dispatch 2026-03-09T00:09:11.573 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:11 vm10 ceph-mon[48982]: from='mgr.24895 ' entity='mgr.y' 2026-03-09T00:09:11.573 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:11 vm10 ceph-mon[48982]: from='mgr.24895 ' entity='mgr.y' 2026-03-09T00:09:11.573 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:11 vm10 ceph-mon[48982]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm10", "name": "osd_memory_target"}]: dispatch 2026-03-09T00:09:11.573 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:11 vm10 ceph-mon[48982]: from='mgr.24895 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm10", "name": "osd_memory_target"}]: dispatch 2026-03-09T00:09:11.573 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:11 vm10 ceph-mon[48982]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:09:11.573 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:11 vm10 ceph-mon[48982]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:09:11.573 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:11 vm10 ceph-mon[48982]: Updating vm04:/etc/ceph/ceph.conf 2026-03-09T00:09:11.573 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:11 vm10 ceph-mon[48982]: Updating vm10:/etc/ceph/ceph.conf 2026-03-09T00:09:11.573 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:11 vm10 ceph-mon[48982]: Updating vm04:/var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/config/ceph.conf 2026-03-09T00:09:11.573 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:11 vm10 ceph-mon[48982]: Updating vm10:/var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/config/ceph.conf 2026-03-09T00:09:11.573 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:09:11 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: 2026-03-09T00:09:11.216+0000 7fb82bd97140 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-09T00:09:11.573 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:09:11 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: 2026-03-09T00:09:11.257+0000 7fb82bd97140 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-09T00:09:11.574 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:09:11 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: 2026-03-09T00:09:11.307+0000 7fb82bd97140 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-09T00:09:11.574 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:09:11 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: 2026-03-09T00:09:11.404+0000 7fb82bd97140 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-09T00:09:11.574 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:09:11 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: 2026-03-09T00:09:11.451+0000 7fb82bd97140 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-09T00:09:11.828 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:09:11 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: 2026-03-09T00:09:11.571+0000 7fb82bd97140 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-09T00:09:11.828 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:09:11 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: 2026-03-09T00:09:11.713+0000 7fb82bd97140 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-09T00:09:11.836 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:11 vm04 ceph-mon[46823]: from='mgr.24895 ' entity='mgr.y' 2026-03-09T00:09:11.836 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:11 vm04 ceph-mon[46823]: from='mgr.24895 ' entity='mgr.y' 2026-03-09T00:09:11.836 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:11 vm04 ceph-mon[46823]: from='mgr.24895 ' entity='mgr.y' 2026-03-09T00:09:11.836 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:11 vm04 ceph-mon[46823]: from='mgr.24895 ' entity='mgr.y' 2026-03-09T00:09:11.836 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:11 vm04 ceph-mon[46823]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm04", "name": "osd_memory_target"}]: dispatch 2026-03-09T00:09:11.836 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:11 vm04 ceph-mon[46823]: from='mgr.24895 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm04", "name": "osd_memory_target"}]: dispatch 2026-03-09T00:09:11.836 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:11 vm04 ceph-mon[46823]: from='mgr.24895 ' entity='mgr.y' 2026-03-09T00:09:11.836 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:11 vm04 ceph-mon[46823]: from='mgr.24895 ' entity='mgr.y' 2026-03-09T00:09:11.836 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:11 vm04 ceph-mon[46823]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm10", "name": "osd_memory_target"}]: dispatch 2026-03-09T00:09:11.836 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:11 vm04 ceph-mon[46823]: from='mgr.24895 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm10", "name": "osd_memory_target"}]: dispatch 2026-03-09T00:09:11.836 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:11 vm04 ceph-mon[46823]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:09:11.836 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:11 vm04 ceph-mon[46823]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:09:11.836 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:11 vm04 ceph-mon[46823]: Updating vm04:/etc/ceph/ceph.conf 2026-03-09T00:09:11.836 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:11 vm04 ceph-mon[46823]: Updating vm10:/etc/ceph/ceph.conf 2026-03-09T00:09:11.836 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:11 vm04 ceph-mon[46823]: Updating vm04:/var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/config/ceph.conf 2026-03-09T00:09:11.836 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:11 vm04 ceph-mon[46823]: Updating vm10:/var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/config/ceph.conf 2026-03-09T00:09:11.836 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:11 vm04 ceph-mon[51053]: from='mgr.24895 ' entity='mgr.y' 2026-03-09T00:09:11.836 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:11 vm04 ceph-mon[51053]: from='mgr.24895 ' entity='mgr.y' 2026-03-09T00:09:11.836 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:11 vm04 ceph-mon[51053]: from='mgr.24895 ' entity='mgr.y' 2026-03-09T00:09:11.836 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:11 vm04 ceph-mon[51053]: from='mgr.24895 ' entity='mgr.y' 2026-03-09T00:09:11.836 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:11 vm04 ceph-mon[51053]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm04", "name": "osd_memory_target"}]: dispatch 2026-03-09T00:09:11.836 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:11 vm04 ceph-mon[51053]: from='mgr.24895 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm04", "name": "osd_memory_target"}]: dispatch 2026-03-09T00:09:11.836 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:11 vm04 ceph-mon[51053]: from='mgr.24895 ' entity='mgr.y' 2026-03-09T00:09:11.836 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:11 vm04 ceph-mon[51053]: from='mgr.24895 ' entity='mgr.y' 2026-03-09T00:09:11.836 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:11 vm04 ceph-mon[51053]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm10", "name": "osd_memory_target"}]: dispatch 2026-03-09T00:09:11.836 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:11 vm04 ceph-mon[51053]: from='mgr.24895 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm10", "name": "osd_memory_target"}]: dispatch 2026-03-09T00:09:11.836 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:11 vm04 ceph-mon[51053]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:09:11.836 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:11 vm04 ceph-mon[51053]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:09:11.836 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:11 vm04 ceph-mon[51053]: Updating vm04:/etc/ceph/ceph.conf 2026-03-09T00:09:11.836 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:11 vm04 ceph-mon[51053]: Updating vm10:/etc/ceph/ceph.conf 2026-03-09T00:09:11.836 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:11 vm04 ceph-mon[51053]: Updating vm04:/var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/config/ceph.conf 2026-03-09T00:09:11.836 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:11 vm04 ceph-mon[51053]: Updating vm10:/var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/config/ceph.conf 2026-03-09T00:09:12.328 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:09:11 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: 2026-03-09T00:09:11.883+0000 7fb82bd97140 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-09T00:09:12.328 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:09:11 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: 2026-03-09T00:09:11.931+0000 7fb82bd97140 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-09T00:09:12.328 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:09:11 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: [09/Mar/2026:00:09:11] ENGINE Bus STARTING 2026-03-09T00:09:12.328 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:09:11 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: CherryPy Checker: 2026-03-09T00:09:12.328 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:09:11 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: The Application mounted at '' has an empty config. 2026-03-09T00:09:12.328 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:09:11 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: 2026-03-09T00:09:12.328 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:09:12 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: [09/Mar/2026:00:09:12] ENGINE Serving on http://:::9283 2026-03-09T00:09:12.328 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:09:12 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: [09/Mar/2026:00:09:12] ENGINE Bus STARTED 2026-03-09T00:09:12.351 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:09:12 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: 2026-03-09T00:09:12.205+0000 7f0aa86f6640 -1 log_channel(cephadm) log [ERR] : cephadm exited with an error code: 1, stderr: Reconfig daemon iscsi.foo.vm04.fbyciv ... 2026-03-09T00:09:12.351 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:09:12 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: Creating ceph-iscsi config... 2026-03-09T00:09:12.351 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:09:12 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: Write file: /var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/iscsi.foo.vm04.fbyciv/iscsi-gateway.cfg 2026-03-09T00:09:12.351 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:09:12 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: Write file: /var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/iscsi.foo.vm04.fbyciv/tcmu-runner-entrypoint.sh 2026-03-09T00:09:12.351 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:09:12 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: Non-zero exit code 1 from systemctl restart ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv 2026-03-09T00:09:12.351 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:09:12 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: systemctl: stderr Job for ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service failed because the control process exited with error code. 2026-03-09T00:09:12.351 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:09:12 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: systemctl: stderr See "systemctl status ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" and "journalctl -xeu ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" for details. 2026-03-09T00:09:12.352 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:09:12 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: Traceback (most recent call last): 2026-03-09T00:09:12.352 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:09:12 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-09T00:09:12.352 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:09:12 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: return _run_code(code, main_globals, None, 2026-03-09T00:09:12.352 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:09:12 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-09T00:09:12.352 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:09:12 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: exec(code, run_globals) 2026-03-09T00:09:12.352 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:09:12 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5592, in 2026-03-09T00:09:12.352 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:09:12 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5580, in main 2026-03-09T00:09:12.352 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:09:12 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3062, in command_deploy_from 2026-03-09T00:09:12.352 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:09:12 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3097, in _common_deploy 2026-03-09T00:09:12.352 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:09:12 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3117, in _deploy_daemon_container 2026-03-09T00:09:12.352 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:09:12 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 1126, in deploy_daemon 2026-03-09T00:09:12.352 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:09:12 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: File "/tmp/tmpbhp06bz6.cephadm.build/app/cephadmlib/call_wrappers.py", line 307, in call_throws 2026-03-09T00:09:12.352 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:09:12 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: RuntimeError: Failed command: systemctl restart ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv: Job for ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service failed because the control process exited with error code. 2026-03-09T00:09:12.352 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:09:12 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: See "systemctl status ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" and "journalctl -xeu ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" for details. 2026-03-09T00:09:12.352 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:09:12 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: Traceback (most recent call last): 2026-03-09T00:09:12.352 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:09:12 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1145, in _check_daemons 2026-03-09T00:09:12.352 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:09:12 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: self.mgr._daemon_action(daemon_spec, action=action) 2026-03-09T00:09:12.352 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:09:12 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: File "/usr/share/ceph/mgr/cephadm/module.py", line 2545, in _daemon_action 2026-03-09T00:09:12.352 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:09:12 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: return self.wait_async( 2026-03-09T00:09:12.352 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:09:12 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: File "/usr/share/ceph/mgr/cephadm/module.py", line 815, in wait_async 2026-03-09T00:09:12.352 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:09:12 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: return self.event_loop.get_result(coro, timeout) 2026-03-09T00:09:12.352 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:09:12 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: File "/usr/share/ceph/mgr/cephadm/ssh.py", line 136, in get_result 2026-03-09T00:09:12.352 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:09:12 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: return future.result(timeout) 2026-03-09T00:09:12.352 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:09:12 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: File "/lib64/python3.9/concurrent/futures/_base.py", line 446, in result 2026-03-09T00:09:12.352 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:09:12 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: return self.__get_result() 2026-03-09T00:09:12.352 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:09:12 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: File "/lib64/python3.9/concurrent/futures/_base.py", line 391, in __get_result 2026-03-09T00:09:12.352 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:09:12 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: raise self._exception 2026-03-09T00:09:12.352 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:09:12 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1381, in _create_daemon 2026-03-09T00:09:12.352 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:09:12 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: out, err, code = await self._run_cephadm( 2026-03-09T00:09:12.352 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:09:12 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1724, in _run_cephadm 2026-03-09T00:09:12.352 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:09:12 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: raise OrchestratorError( 2026-03-09T00:09:12.352 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:09:12 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: orchestrator._interface.OrchestratorError: cephadm exited with an error code: 1, stderr: Reconfig daemon iscsi.foo.vm04.fbyciv ... 2026-03-09T00:09:12.352 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:09:12 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: Creating ceph-iscsi config... 2026-03-09T00:09:12.352 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:09:12 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: Write file: /var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/iscsi.foo.vm04.fbyciv/iscsi-gateway.cfg 2026-03-09T00:09:12.352 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:09:12 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: Write file: /var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/iscsi.foo.vm04.fbyciv/tcmu-runner-entrypoint.sh 2026-03-09T00:09:12.352 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:09:12 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: Non-zero exit code 1 from systemctl restart ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv 2026-03-09T00:09:12.352 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:09:12 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: systemctl: stderr Job for ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service failed because the control process exited with error code. 2026-03-09T00:09:12.352 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:09:12 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: systemctl: stderr See "systemctl status ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" and "journalctl -xeu ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" for details. 2026-03-09T00:09:12.352 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:09:12 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: Traceback (most recent call last): 2026-03-09T00:09:12.352 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:09:12 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-09T00:09:12.352 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:09:12 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: return _run_code(code, main_globals, None, 2026-03-09T00:09:12.352 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:09:12 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-09T00:09:12.352 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:09:12 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: exec(code, run_globals) 2026-03-09T00:09:12.352 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:09:12 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5592, in 2026-03-09T00:09:12.352 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:09:12 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5580, in main 2026-03-09T00:09:12.352 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:09:12 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3062, in command_deploy_from 2026-03-09T00:09:12.352 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:09:12 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3097, in _common_deploy 2026-03-09T00:09:12.352 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:09:12 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3117, in _deploy_daemon_container 2026-03-09T00:09:12.352 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:09:12 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 1126, in deploy_daemon 2026-03-09T00:09:12.352 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:09:12 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: File "/tmp/tmpbhp06bz6.cephadm.build/app/cephadmlib/call_wrappers.py", line 307, in call_throws 2026-03-09T00:09:12.352 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:09:12 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: RuntimeError: Failed command: systemctl restart ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv: Job for ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service failed because the control process exited with error code. 2026-03-09T00:09:12.352 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:09:12 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: See "systemctl status ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" and "journalctl -xeu ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" for details. 2026-03-09T00:09:12.720 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:12 vm10 ceph-mon[48982]: Updating vm04:/etc/ceph/ceph.client.admin.keyring 2026-03-09T00:09:12.720 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:12 vm10 ceph-mon[48982]: Updating vm10:/etc/ceph/ceph.client.admin.keyring 2026-03-09T00:09:12.720 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:12 vm10 ceph-mon[48982]: Updating vm04:/var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/config/ceph.client.admin.keyring 2026-03-09T00:09:12.720 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:12 vm10 ceph-mon[48982]: Updating vm10:/var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/config/ceph.client.admin.keyring 2026-03-09T00:09:12.720 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:12 vm10 ceph-mon[48982]: mgrmap e29: y(active, since 4s) 2026-03-09T00:09:12.720 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:12 vm10 ceph-mon[48982]: from='mgr.24895 ' entity='mgr.y' 2026-03-09T00:09:12.720 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:12 vm10 ceph-mon[48982]: from='mgr.24895 ' entity='mgr.y' 2026-03-09T00:09:12.720 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:12 vm10 ceph-mon[48982]: from='mgr.24895 ' entity='mgr.y' 2026-03-09T00:09:12.720 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:12 vm10 ceph-mon[48982]: from='mgr.24895 ' entity='mgr.y' 2026-03-09T00:09:12.720 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:12 vm10 ceph-mon[48982]: from='mgr.24895 ' entity='mgr.y' 2026-03-09T00:09:12.720 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:12 vm10 ceph-mon[48982]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:09:12.720 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:12 vm10 ceph-mon[48982]: Reconfiguring iscsi.foo.vm04.fbyciv (dependencies changed)... 2026-03-09T00:09:12.720 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:12 vm10 ceph-mon[48982]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm04.fbyciv", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T00:09:12.720 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:12 vm10 ceph-mon[48982]: from='mgr.24895 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm04.fbyciv", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T00:09:12.720 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:12 vm10 ceph-mon[48982]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:09:12.720 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:12 vm10 ceph-mon[48982]: Reconfiguring daemon iscsi.foo.vm04.fbyciv on vm04 2026-03-09T00:09:12.720 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:12 vm10 ceph-mon[48982]: Standby manager daemon x started 2026-03-09T00:09:12.720 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:12 vm10 ceph-mon[48982]: from='mgr.? 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-09T00:09:12.720 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:12 vm10 ceph-mon[48982]: from='mgr.? 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-09T00:09:12.720 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:12 vm10 ceph-mon[48982]: from='mgr.? 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-09T00:09:12.720 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:12 vm10 ceph-mon[48982]: from='mgr.? 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-09T00:09:12.720 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:12 vm10 ceph-mon[48982]: pgmap v5: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail 2026-03-09T00:09:12.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:12 vm04 ceph-mon[51053]: Updating vm04:/etc/ceph/ceph.client.admin.keyring 2026-03-09T00:09:12.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:12 vm04 ceph-mon[51053]: Updating vm10:/etc/ceph/ceph.client.admin.keyring 2026-03-09T00:09:12.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:12 vm04 ceph-mon[51053]: Updating vm04:/var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/config/ceph.client.admin.keyring 2026-03-09T00:09:12.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:12 vm04 ceph-mon[51053]: Updating vm10:/var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/config/ceph.client.admin.keyring 2026-03-09T00:09:12.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:12 vm04 ceph-mon[51053]: mgrmap e29: y(active, since 4s) 2026-03-09T00:09:12.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:12 vm04 ceph-mon[51053]: from='mgr.24895 ' entity='mgr.y' 2026-03-09T00:09:12.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:12 vm04 ceph-mon[51053]: from='mgr.24895 ' entity='mgr.y' 2026-03-09T00:09:12.852 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:12 vm04 ceph-mon[51053]: from='mgr.24895 ' entity='mgr.y' 2026-03-09T00:09:12.852 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:12 vm04 ceph-mon[51053]: from='mgr.24895 ' entity='mgr.y' 2026-03-09T00:09:12.852 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:12 vm04 ceph-mon[51053]: from='mgr.24895 ' entity='mgr.y' 2026-03-09T00:09:12.852 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:12 vm04 ceph-mon[51053]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:09:12.852 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:12 vm04 ceph-mon[51053]: Reconfiguring iscsi.foo.vm04.fbyciv (dependencies changed)... 2026-03-09T00:09:12.852 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:12 vm04 ceph-mon[51053]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm04.fbyciv", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T00:09:12.852 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:12 vm04 ceph-mon[51053]: from='mgr.24895 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm04.fbyciv", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T00:09:12.852 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:12 vm04 ceph-mon[51053]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:09:12.852 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:12 vm04 ceph-mon[51053]: Reconfiguring daemon iscsi.foo.vm04.fbyciv on vm04 2026-03-09T00:09:12.852 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:12 vm04 ceph-mon[51053]: Standby manager daemon x started 2026-03-09T00:09:12.852 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:12 vm04 ceph-mon[51053]: from='mgr.? 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-09T00:09:12.852 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:12 vm04 ceph-mon[51053]: from='mgr.? 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-09T00:09:12.852 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:12 vm04 ceph-mon[51053]: from='mgr.? 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-09T00:09:12.852 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:12 vm04 ceph-mon[51053]: from='mgr.? 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-09T00:09:12.852 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:12 vm04 ceph-mon[51053]: pgmap v5: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail 2026-03-09T00:09:12.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:12 vm04 ceph-mon[46823]: Updating vm04:/etc/ceph/ceph.client.admin.keyring 2026-03-09T00:09:12.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:12 vm04 ceph-mon[46823]: Updating vm10:/etc/ceph/ceph.client.admin.keyring 2026-03-09T00:09:12.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:12 vm04 ceph-mon[46823]: Updating vm04:/var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/config/ceph.client.admin.keyring 2026-03-09T00:09:12.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:12 vm04 ceph-mon[46823]: Updating vm10:/var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/config/ceph.client.admin.keyring 2026-03-09T00:09:12.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:12 vm04 ceph-mon[46823]: mgrmap e29: y(active, since 4s) 2026-03-09T00:09:12.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:12 vm04 ceph-mon[46823]: from='mgr.24895 ' entity='mgr.y' 2026-03-09T00:09:12.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:12 vm04 ceph-mon[46823]: from='mgr.24895 ' entity='mgr.y' 2026-03-09T00:09:12.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:12 vm04 ceph-mon[46823]: from='mgr.24895 ' entity='mgr.y' 2026-03-09T00:09:12.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:12 vm04 ceph-mon[46823]: from='mgr.24895 ' entity='mgr.y' 2026-03-09T00:09:12.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:12 vm04 ceph-mon[46823]: from='mgr.24895 ' entity='mgr.y' 2026-03-09T00:09:12.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:12 vm04 ceph-mon[46823]: from='client.14937 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:09:12.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:12 vm04 ceph-mon[46823]: Reconfiguring iscsi.foo.vm04.fbyciv (dependencies changed)... 2026-03-09T00:09:12.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:12 vm04 ceph-mon[46823]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm04.fbyciv", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T00:09:12.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:12 vm04 ceph-mon[46823]: from='mgr.24895 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm04.fbyciv", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T00:09:12.853 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:12 vm04 ceph-mon[46823]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:09:12.853 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:12 vm04 ceph-mon[46823]: Reconfiguring daemon iscsi.foo.vm04.fbyciv on vm04 2026-03-09T00:09:12.853 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:12 vm04 ceph-mon[46823]: Standby manager daemon x started 2026-03-09T00:09:12.853 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:12 vm04 ceph-mon[46823]: from='mgr.? 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-09T00:09:12.853 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:12 vm04 ceph-mon[46823]: from='mgr.? 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-09T00:09:12.853 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:12 vm04 ceph-mon[46823]: from='mgr.? 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-09T00:09:12.853 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:12 vm04 ceph-mon[46823]: from='mgr.? 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-09T00:09:12.853 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:12 vm04 ceph-mon[46823]: pgmap v5: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail 2026-03-09T00:09:12.995 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:09:12 vm10 systemd[1]: Stopping Ceph prometheus.a for fdcbddf6-1b49-11f1-80b0-7392062373f9... 2026-03-09T00:09:12.996 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:09:12 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[70647]: ts=2026-03-09T00:09:12.905Z caller=main.go:964 level=warn msg="Received SIGTERM, exiting gracefully..." 2026-03-09T00:09:12.996 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:09:12 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[70647]: ts=2026-03-09T00:09:12.906Z caller=main.go:988 level=info msg="Stopping scrape discovery manager..." 2026-03-09T00:09:12.996 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:09:12 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[70647]: ts=2026-03-09T00:09:12.906Z caller=main.go:1002 level=info msg="Stopping notify discovery manager..." 2026-03-09T00:09:12.996 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:09:12 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[70647]: ts=2026-03-09T00:09:12.906Z caller=manager.go:177 level=info component="rule manager" msg="Stopping rule manager..." 2026-03-09T00:09:12.996 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:09:12 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[70647]: ts=2026-03-09T00:09:12.906Z caller=main.go:984 level=info msg="Scrape discovery manager stopped" 2026-03-09T00:09:12.996 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:09:12 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[70647]: ts=2026-03-09T00:09:12.906Z caller=main.go:998 level=info msg="Notify discovery manager stopped" 2026-03-09T00:09:12.996 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:09:12 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[70647]: ts=2026-03-09T00:09:12.906Z caller=manager.go:187 level=info component="rule manager" msg="Rule manager stopped" 2026-03-09T00:09:12.996 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:09:12 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[70647]: ts=2026-03-09T00:09:12.906Z caller=main.go:1039 level=info msg="Stopping scrape manager..." 2026-03-09T00:09:12.996 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:09:12 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[70647]: ts=2026-03-09T00:09:12.906Z caller=main.go:1031 level=info msg="Scrape manager stopped" 2026-03-09T00:09:12.996 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:09:12 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[70647]: ts=2026-03-09T00:09:12.908Z caller=notifier.go:618 level=info component=notifier msg="Stopping notification manager..." 2026-03-09T00:09:12.996 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:09:12 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[70647]: ts=2026-03-09T00:09:12.908Z caller=main.go:1261 level=info msg="Notifier manager stopped" 2026-03-09T00:09:12.996 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:09:12 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[70647]: ts=2026-03-09T00:09:12.908Z caller=main.go:1273 level=info msg="See you next time!" 2026-03-09T00:09:12.996 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:09:12 vm10 podman[74346]: 2026-03-09 00:09:12.916290427 +0000 UTC m=+0.025701728 container died 98db0255a2819c6fb43a95c5a414635aea14993dd545d7ffbf3b76c344476396 (image=quay.io/prometheus/prometheus:v2.51.0, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-09T00:09:12.996 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:09:12 vm10 podman[74346]: 2026-03-09 00:09:12.935461573 +0000 UTC m=+0.044872864 container remove 98db0255a2819c6fb43a95c5a414635aea14993dd545d7ffbf3b76c344476396 (image=quay.io/prometheus/prometheus:v2.51.0, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-09T00:09:12.996 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:09:12 vm10 bash[74346]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a 2026-03-09T00:09:13.328 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:09:13 vm10 systemd[1]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@prometheus.a.service: Deactivated successfully. 2026-03-09T00:09:13.329 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:09:13 vm10 systemd[1]: Stopped Ceph prometheus.a for fdcbddf6-1b49-11f1-80b0-7392062373f9. 2026-03-09T00:09:13.329 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:09:13 vm10 systemd[1]: Starting Ceph prometheus.a for fdcbddf6-1b49-11f1-80b0-7392062373f9... 2026-03-09T00:09:13.329 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:09:13 vm10 podman[74415]: 2026-03-09 00:09:13.104329371 +0000 UTC m=+0.020596994 container create 96d8c7720cf16d3d7d289c2abef64bb8ace4bea9361a00f20f5621d2f0bbc9c5 (image=quay.io/prometheus/prometheus:v2.51.0, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-09T00:09:13.329 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:09:13 vm10 podman[74415]: 2026-03-09 00:09:13.1326691 +0000 UTC m=+0.048936723 container init 96d8c7720cf16d3d7d289c2abef64bb8ace4bea9361a00f20f5621d2f0bbc9c5 (image=quay.io/prometheus/prometheus:v2.51.0, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-09T00:09:13.329 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:09:13 vm10 podman[74415]: 2026-03-09 00:09:13.135887608 +0000 UTC m=+0.052155231 container start 96d8c7720cf16d3d7d289c2abef64bb8ace4bea9361a00f20f5621d2f0bbc9c5 (image=quay.io/prometheus/prometheus:v2.51.0, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-09T00:09:13.329 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:09:13 vm10 bash[74415]: 96d8c7720cf16d3d7d289c2abef64bb8ace4bea9361a00f20f5621d2f0bbc9c5 2026-03-09T00:09:13.329 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:09:13 vm10 podman[74415]: 2026-03-09 00:09:13.095679176 +0000 UTC m=+0.011946800 image pull 1d3b7f56885b6dd623f1785be963aa9c195f86bc256ea454e8d02a7980b79c53 quay.io/prometheus/prometheus:v2.51.0 2026-03-09T00:09:13.329 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:09:13 vm10 systemd[1]: Started Ceph prometheus.a for fdcbddf6-1b49-11f1-80b0-7392062373f9. 2026-03-09T00:09:13.329 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:09:13 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[74427]: ts=2026-03-09T00:09:13.169Z caller=main.go:617 level=info msg="Starting Prometheus Server" mode=server version="(version=2.51.0, branch=HEAD, revision=c05c15512acb675e3f6cd662a6727854e93fc024)" 2026-03-09T00:09:13.329 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:09:13 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[74427]: ts=2026-03-09T00:09:13.170Z caller=main.go:622 level=info build_context="(go=go1.22.1, platform=linux/amd64, user=root@b5723e458358, date=20240319-10:54:45, tags=netgo,builtinassets,stringlabels)" 2026-03-09T00:09:13.329 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:09:13 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[74427]: ts=2026-03-09T00:09:13.170Z caller=main.go:623 level=info host_details="(Linux 5.14.0-686.el9.x86_64 #1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026 x86_64 vm10 (none))" 2026-03-09T00:09:13.329 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:09:13 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[74427]: ts=2026-03-09T00:09:13.170Z caller=main.go:624 level=info fd_limits="(soft=1048576, hard=1048576)" 2026-03-09T00:09:13.329 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:09:13 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[74427]: ts=2026-03-09T00:09:13.170Z caller=main.go:625 level=info vm_limits="(soft=unlimited, hard=unlimited)" 2026-03-09T00:09:13.329 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:09:13 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[74427]: ts=2026-03-09T00:09:13.173Z caller=web.go:568 level=info component=web msg="Start listening for connections" address=:9095 2026-03-09T00:09:13.329 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:09:13 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[74427]: ts=2026-03-09T00:09:13.176Z caller=main.go:1129 level=info msg="Starting TSDB ..." 2026-03-09T00:09:13.329 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:09:13 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[74427]: ts=2026-03-09T00:09:13.178Z caller=head.go:616 level=info component=tsdb msg="Replaying on-disk memory mappable chunks if any" 2026-03-09T00:09:13.329 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:09:13 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[74427]: ts=2026-03-09T00:09:13.179Z caller=head.go:698 level=info component=tsdb msg="On-disk memory mappable chunks replay completed" duration=704.317µs 2026-03-09T00:09:13.329 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:09:13 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[74427]: ts=2026-03-09T00:09:13.179Z caller=head.go:706 level=info component=tsdb msg="Replaying WAL, this may take a while" 2026-03-09T00:09:13.329 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:09:13 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[74427]: ts=2026-03-09T00:09:13.180Z caller=tls_config.go:313 level=info component=web msg="Listening on" address=[::]:9095 2026-03-09T00:09:13.329 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:09:13 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[74427]: ts=2026-03-09T00:09:13.180Z caller=tls_config.go:316 level=info component=web msg="TLS is disabled." http2=false address=[::]:9095 2026-03-09T00:09:13.329 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:09:13 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[74427]: ts=2026-03-09T00:09:13.185Z caller=head.go:778 level=info component=tsdb msg="WAL segment loaded" segment=0 maxSegment=3 2026-03-09T00:09:13.329 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:09:13 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[74427]: ts=2026-03-09T00:09:13.194Z caller=head.go:778 level=info component=tsdb msg="WAL segment loaded" segment=1 maxSegment=3 2026-03-09T00:09:13.329 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:09:13 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[74427]: ts=2026-03-09T00:09:13.200Z caller=head.go:778 level=info component=tsdb msg="WAL segment loaded" segment=2 maxSegment=3 2026-03-09T00:09:13.329 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:09:13 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[74427]: ts=2026-03-09T00:09:13.201Z caller=head.go:778 level=info component=tsdb msg="WAL segment loaded" segment=3 maxSegment=3 2026-03-09T00:09:13.329 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:09:13 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[74427]: ts=2026-03-09T00:09:13.201Z caller=head.go:815 level=info component=tsdb msg="WAL replay completed" checkpoint_replay_duration=86.411µs wal_replay_duration=21.874711ms wbl_replay_duration=181ns total_replay_duration=22.682171ms 2026-03-09T00:09:13.329 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:09:13 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[74427]: ts=2026-03-09T00:09:13.202Z caller=main.go:1150 level=info fs_type=XFS_SUPER_MAGIC 2026-03-09T00:09:13.329 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:09:13 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[74427]: ts=2026-03-09T00:09:13.202Z caller=main.go:1153 level=info msg="TSDB started" 2026-03-09T00:09:13.329 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:09:13 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[74427]: ts=2026-03-09T00:09:13.202Z caller=main.go:1335 level=info msg="Loading configuration file" filename=/etc/prometheus/prometheus.yml 2026-03-09T00:09:13.329 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:09:13 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[74427]: ts=2026-03-09T00:09:13.211Z caller=main.go:1372 level=info msg="Completed loading of configuration file" filename=/etc/prometheus/prometheus.yml totalDuration=9.474697ms db_storage=711ns remote_storage=1.323µs web_handler=301ns query_engine=451ns scrape=979.081µs scrape_sd=96.029µs notify=9.178µs notify_sd=5.36µs rules=8.115314ms tracing=5.45µs 2026-03-09T00:09:13.329 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:09:13 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[74427]: ts=2026-03-09T00:09:13.211Z caller=main.go:1114 level=info msg="Server is ready to receive web requests." 2026-03-09T00:09:13.329 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:09:13 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[74427]: ts=2026-03-09T00:09:13.212Z caller=manager.go:163 level=info component="rule manager" msg="Starting rule manager..." 2026-03-09T00:09:13.601 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:09:13 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: [09/Mar/2026:00:09:13] ENGINE Bus STOPPING 2026-03-09T00:09:14.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:13 vm10 ceph-mon[48982]: cephadm exited with an error code: 1, stderr: Reconfig daemon iscsi.foo.vm04.fbyciv ... 2026-03-09T00:09:14.078 INFO:journalctl@ceph.mon.b.vm10.stdout: Creating ceph-iscsi config... 2026-03-09T00:09:14.078 INFO:journalctl@ceph.mon.b.vm10.stdout: Write file: /var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/iscsi.foo.vm04.fbyciv/iscsi-gateway.cfg 2026-03-09T00:09:14.078 INFO:journalctl@ceph.mon.b.vm10.stdout: Write file: /var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/iscsi.foo.vm04.fbyciv/tcmu-runner-entrypoint.sh 2026-03-09T00:09:14.078 INFO:journalctl@ceph.mon.b.vm10.stdout: Non-zero exit code 1 from systemctl restart ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv 2026-03-09T00:09:14.078 INFO:journalctl@ceph.mon.b.vm10.stdout: systemctl: stderr Job for ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service failed because the control process exited with error code. 2026-03-09T00:09:14.078 INFO:journalctl@ceph.mon.b.vm10.stdout: systemctl: stderr See "systemctl status ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" and "journalctl -xeu ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" for details. 2026-03-09T00:09:14.078 INFO:journalctl@ceph.mon.b.vm10.stdout: Traceback (most recent call last): 2026-03-09T00:09:14.078 INFO:journalctl@ceph.mon.b.vm10.stdout: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-09T00:09:14.078 INFO:journalctl@ceph.mon.b.vm10.stdout: return _run_code(code, main_globals, None, 2026-03-09T00:09:14.078 INFO:journalctl@ceph.mon.b.vm10.stdout: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-09T00:09:14.078 INFO:journalctl@ceph.mon.b.vm10.stdout: exec(code, run_globals) 2026-03-09T00:09:14.078 INFO:journalctl@ceph.mon.b.vm10.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5592, in 2026-03-09T00:09:14.078 INFO:journalctl@ceph.mon.b.vm10.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5580, in main 2026-03-09T00:09:14.078 INFO:journalctl@ceph.mon.b.vm10.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3062, in command_deploy_from 2026-03-09T00:09:14.078 INFO:journalctl@ceph.mon.b.vm10.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3097, in _common_deploy 2026-03-09T00:09:14.079 INFO:journalctl@ceph.mon.b.vm10.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3117, in _deploy_daemon_container 2026-03-09T00:09:14.079 INFO:journalctl@ceph.mon.b.vm10.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 1126, in deploy_daemon 2026-03-09T00:09:14.079 INFO:journalctl@ceph.mon.b.vm10.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/cephadmlib/call_wrappers.py", line 307, in call_throws 2026-03-09T00:09:14.079 INFO:journalctl@ceph.mon.b.vm10.stdout: RuntimeError: Failed command: systemctl restart ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv: Job for ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service failed because the control process exited with error code. 2026-03-09T00:09:14.079 INFO:journalctl@ceph.mon.b.vm10.stdout: See "systemctl status ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" and "journalctl -xeu ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" for details. 2026-03-09T00:09:14.079 INFO:journalctl@ceph.mon.b.vm10.stdout: Traceback (most recent call last): 2026-03-09T00:09:14.079 INFO:journalctl@ceph.mon.b.vm10.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1145, in _check_daemons 2026-03-09T00:09:14.079 INFO:journalctl@ceph.mon.b.vm10.stdout: self.mgr._daemon_action(daemon_spec, action=action) 2026-03-09T00:09:14.079 INFO:journalctl@ceph.mon.b.vm10.stdout: File "/usr/share/ceph/mgr/cephadm/module.py", line 2545, in _daemon_action 2026-03-09T00:09:14.079 INFO:journalctl@ceph.mon.b.vm10.stdout: return self.wait_async( 2026-03-09T00:09:14.079 INFO:journalctl@ceph.mon.b.vm10.stdout: File "/usr/share/ceph/mgr/cephadm/module.py", line 815, in wait_async 2026-03-09T00:09:14.079 INFO:journalctl@ceph.mon.b.vm10.stdout: return self.event_loop.get_result(coro, timeout) 2026-03-09T00:09:14.079 INFO:journalctl@ceph.mon.b.vm10.stdout: File "/usr/share/ceph/mgr/cephadm/ssh.py", line 136, in get_result 2026-03-09T00:09:14.079 INFO:journalctl@ceph.mon.b.vm10.stdout: return future.result(timeout) 2026-03-09T00:09:14.079 INFO:journalctl@ceph.mon.b.vm10.stdout: File "/lib64/python3.9/concurrent/futures/_base.py", line 446, in result 2026-03-09T00:09:14.079 INFO:journalctl@ceph.mon.b.vm10.stdout: return self.__get_result() 2026-03-09T00:09:14.079 INFO:journalctl@ceph.mon.b.vm10.stdout: File "/lib64/python3.9/concurrent/futures/_base.py", line 391, in __get_result 2026-03-09T00:09:14.079 INFO:journalctl@ceph.mon.b.vm10.stdout: raise self._exception 2026-03-09T00:09:14.079 INFO:journalctl@ceph.mon.b.vm10.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1381, in _create_daemon 2026-03-09T00:09:14.079 INFO:journalctl@ceph.mon.b.vm10.stdout: out, err, code = await self._run_cephadm( 2026-03-09T00:09:14.079 INFO:journalctl@ceph.mon.b.vm10.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1724, in _run_cephadm 2026-03-09T00:09:14.079 INFO:journalctl@ceph.mon.b.vm10.stdout: raise OrchestratorError( 2026-03-09T00:09:14.079 INFO:journalctl@ceph.mon.b.vm10.stdout: orchestrator._interface.OrchestratorError: cephadm exited with an error code: 1, stderr: Reconfig daemon iscsi.foo.vm04.fbyciv ... 2026-03-09T00:09:14.079 INFO:journalctl@ceph.mon.b.vm10.stdout: Creating ceph-iscsi config... 2026-03-09T00:09:14.079 INFO:journalctl@ceph.mon.b.vm10.stdout: Write file: /var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/iscsi.foo.vm04.fbyciv/iscsi-gateway.cfg 2026-03-09T00:09:14.079 INFO:journalctl@ceph.mon.b.vm10.stdout: Write file: /var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/iscsi.foo.vm04.fbyciv/tcmu-runner-entrypoint.sh 2026-03-09T00:09:14.079 INFO:journalctl@ceph.mon.b.vm10.stdout: Non-zero exit code 1 from systemctl restart ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv 2026-03-09T00:09:14.079 INFO:journalctl@ceph.mon.b.vm10.stdout: systemctl: stderr Job for ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service failed because the control process exited with error code. 2026-03-09T00:09:14.079 INFO:journalctl@ceph.mon.b.vm10.stdout: systemctl: stderr See "systemctl status ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" and "journalctl -xeu ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" for details. 2026-03-09T00:09:14.079 INFO:journalctl@ceph.mon.b.vm10.stdout: Traceback (most recent call last): 2026-03-09T00:09:14.079 INFO:journalctl@ceph.mon.b.vm10.stdout: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-09T00:09:14.079 INFO:journalctl@ceph.mon.b.vm10.stdout: return _run_code(code, main_globals, None, 2026-03-09T00:09:14.079 INFO:journalctl@ceph.mon.b.vm10.stdout: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-09T00:09:14.079 INFO:journalctl@ceph.mon.b.vm10.stdout: exec(code, run_globals) 2026-03-09T00:09:14.079 INFO:journalctl@ceph.mon.b.vm10.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5592, in 2026-03-09T00:09:14.079 INFO:journalctl@ceph.mon.b.vm10.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5580, in main 2026-03-09T00:09:14.079 INFO:journalctl@ceph.mon.b.vm10.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3062, in command_deploy_from 2026-03-09T00:09:14.079 INFO:journalctl@ceph.mon.b.vm10.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3097, in _common_deploy 2026-03-09T00:09:14.079 INFO:journalctl@ceph.mon.b.vm10.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3117, in _deploy_daemon_container 2026-03-09T00:09:14.079 INFO:journalctl@ceph.mon.b.vm10.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 1126, in deploy_daemon 2026-03-09T00:09:14.079 INFO:journalctl@ceph.mon.b.vm10.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/cephadmlib/call_wrappers.py", line 307, in call_throws 2026-03-09T00:09:14.079 INFO:journalctl@ceph.mon.b.vm10.stdout: RuntimeError: Failed command: systemctl restart ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv: Job for ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service failed because the control process exited with error code. 2026-03-09T00:09:14.079 INFO:journalctl@ceph.mon.b.vm10.stdout: See "systemctl status ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" and "journalctl -xeu ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" for details. 2026-03-09T00:09:14.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:13 vm10 ceph-mon[48982]: Reconfiguring prometheus.a (dependencies changed)... 2026-03-09T00:09:14.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:13 vm10 ceph-mon[48982]: Reconfiguring daemon prometheus.a on vm10 2026-03-09T00:09:14.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:13 vm10 ceph-mon[48982]: mgrmap e30: y(active, since 5s), standbys: x 2026-03-09T00:09:14.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:13 vm10 ceph-mon[48982]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-09T00:09:14.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:13 vm10 ceph-mon[48982]: from='mgr.24895 ' entity='mgr.y' 2026-03-09T00:09:14.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:13 vm10 ceph-mon[48982]: from='mgr.24895 ' entity='mgr.y' 2026-03-09T00:09:14.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:13 vm10 ceph-mon[48982]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-09T00:09:14.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:13 vm10 ceph-mon[48982]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm10.local:9095"}]: dispatch 2026-03-09T00:09:14.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:13 vm10 ceph-mon[48982]: from='mgr.24895 ' entity='mgr.y' 2026-03-09T00:09:14.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:13 vm10 ceph-mon[48982]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:09:14.101 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:09:13 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: [09/Mar/2026:00:09:13] ENGINE HTTP Server cherrypy._cpwsgi_server.CPWSGIServer(('::', 9283)) shut down 2026-03-09T00:09:14.101 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:09:13 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: [09/Mar/2026:00:09:13] ENGINE Bus STOPPED 2026-03-09T00:09:14.101 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:09:13 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: [09/Mar/2026:00:09:13] ENGINE Bus STARTING 2026-03-09T00:09:14.101 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:09:13 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: [09/Mar/2026:00:09:13] ENGINE Serving on http://:::9283 2026-03-09T00:09:14.101 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:09:13 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: [09/Mar/2026:00:09:13] ENGINE Bus STARTED 2026-03-09T00:09:14.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:13 vm04 ceph-mon[51053]: cephadm exited with an error code: 1, stderr: Reconfig daemon iscsi.foo.vm04.fbyciv ... 2026-03-09T00:09:14.101 INFO:journalctl@ceph.mon.c.vm04.stdout: Creating ceph-iscsi config... 2026-03-09T00:09:14.101 INFO:journalctl@ceph.mon.c.vm04.stdout: Write file: /var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/iscsi.foo.vm04.fbyciv/iscsi-gateway.cfg 2026-03-09T00:09:14.101 INFO:journalctl@ceph.mon.c.vm04.stdout: Write file: /var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/iscsi.foo.vm04.fbyciv/tcmu-runner-entrypoint.sh 2026-03-09T00:09:14.101 INFO:journalctl@ceph.mon.c.vm04.stdout: Non-zero exit code 1 from systemctl restart ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv 2026-03-09T00:09:14.101 INFO:journalctl@ceph.mon.c.vm04.stdout: systemctl: stderr Job for ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service failed because the control process exited with error code. 2026-03-09T00:09:14.101 INFO:journalctl@ceph.mon.c.vm04.stdout: systemctl: stderr See "systemctl status ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" and "journalctl -xeu ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" for details. 2026-03-09T00:09:14.101 INFO:journalctl@ceph.mon.c.vm04.stdout: Traceback (most recent call last): 2026-03-09T00:09:14.101 INFO:journalctl@ceph.mon.c.vm04.stdout: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-09T00:09:14.101 INFO:journalctl@ceph.mon.c.vm04.stdout: return _run_code(code, main_globals, None, 2026-03-09T00:09:14.101 INFO:journalctl@ceph.mon.c.vm04.stdout: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-09T00:09:14.101 INFO:journalctl@ceph.mon.c.vm04.stdout: exec(code, run_globals) 2026-03-09T00:09:14.101 INFO:journalctl@ceph.mon.c.vm04.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5592, in 2026-03-09T00:09:14.101 INFO:journalctl@ceph.mon.c.vm04.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5580, in main 2026-03-09T00:09:14.101 INFO:journalctl@ceph.mon.c.vm04.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3062, in command_deploy_from 2026-03-09T00:09:14.101 INFO:journalctl@ceph.mon.c.vm04.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3097, in _common_deploy 2026-03-09T00:09:14.101 INFO:journalctl@ceph.mon.c.vm04.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3117, in _deploy_daemon_container 2026-03-09T00:09:14.101 INFO:journalctl@ceph.mon.c.vm04.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 1126, in deploy_daemon 2026-03-09T00:09:14.101 INFO:journalctl@ceph.mon.c.vm04.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/cephadmlib/call_wrappers.py", line 307, in call_throws 2026-03-09T00:09:14.101 INFO:journalctl@ceph.mon.c.vm04.stdout: RuntimeError: Failed command: systemctl restart ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv: Job for ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service failed because the control process exited with error code. 2026-03-09T00:09:14.101 INFO:journalctl@ceph.mon.c.vm04.stdout: See "systemctl status ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" and "journalctl -xeu ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" for details. 2026-03-09T00:09:14.101 INFO:journalctl@ceph.mon.c.vm04.stdout: Traceback (most recent call last): 2026-03-09T00:09:14.102 INFO:journalctl@ceph.mon.c.vm04.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1145, in _check_daemons 2026-03-09T00:09:14.102 INFO:journalctl@ceph.mon.c.vm04.stdout: self.mgr._daemon_action(daemon_spec, action=action) 2026-03-09T00:09:14.102 INFO:journalctl@ceph.mon.c.vm04.stdout: File "/usr/share/ceph/mgr/cephadm/module.py", line 2545, in _daemon_action 2026-03-09T00:09:14.102 INFO:journalctl@ceph.mon.c.vm04.stdout: return self.wait_async( 2026-03-09T00:09:14.102 INFO:journalctl@ceph.mon.c.vm04.stdout: File "/usr/share/ceph/mgr/cephadm/module.py", line 815, in wait_async 2026-03-09T00:09:14.102 INFO:journalctl@ceph.mon.c.vm04.stdout: return self.event_loop.get_result(coro, timeout) 2026-03-09T00:09:14.102 INFO:journalctl@ceph.mon.c.vm04.stdout: File "/usr/share/ceph/mgr/cephadm/ssh.py", line 136, in get_result 2026-03-09T00:09:14.102 INFO:journalctl@ceph.mon.c.vm04.stdout: return future.result(timeout) 2026-03-09T00:09:14.102 INFO:journalctl@ceph.mon.c.vm04.stdout: File "/lib64/python3.9/concurrent/futures/_base.py", line 446, in result 2026-03-09T00:09:14.102 INFO:journalctl@ceph.mon.c.vm04.stdout: return self.__get_result() 2026-03-09T00:09:14.102 INFO:journalctl@ceph.mon.c.vm04.stdout: File "/lib64/python3.9/concurrent/futures/_base.py", line 391, in __get_result 2026-03-09T00:09:14.102 INFO:journalctl@ceph.mon.c.vm04.stdout: raise self._exception 2026-03-09T00:09:14.102 INFO:journalctl@ceph.mon.c.vm04.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1381, in _create_daemon 2026-03-09T00:09:14.102 INFO:journalctl@ceph.mon.c.vm04.stdout: out, err, code = await self._run_cephadm( 2026-03-09T00:09:14.102 INFO:journalctl@ceph.mon.c.vm04.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1724, in _run_cephadm 2026-03-09T00:09:14.102 INFO:journalctl@ceph.mon.c.vm04.stdout: raise OrchestratorError( 2026-03-09T00:09:14.102 INFO:journalctl@ceph.mon.c.vm04.stdout: orchestrator._interface.OrchestratorError: cephadm exited with an error code: 1, stderr: Reconfig daemon iscsi.foo.vm04.fbyciv ... 2026-03-09T00:09:14.102 INFO:journalctl@ceph.mon.c.vm04.stdout: Creating ceph-iscsi config... 2026-03-09T00:09:14.102 INFO:journalctl@ceph.mon.c.vm04.stdout: Write file: /var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/iscsi.foo.vm04.fbyciv/iscsi-gateway.cfg 2026-03-09T00:09:14.102 INFO:journalctl@ceph.mon.c.vm04.stdout: Write file: /var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/iscsi.foo.vm04.fbyciv/tcmu-runner-entrypoint.sh 2026-03-09T00:09:14.102 INFO:journalctl@ceph.mon.c.vm04.stdout: Non-zero exit code 1 from systemctl restart ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv 2026-03-09T00:09:14.102 INFO:journalctl@ceph.mon.c.vm04.stdout: systemctl: stderr Job for ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service failed because the control process exited with error code. 2026-03-09T00:09:14.102 INFO:journalctl@ceph.mon.c.vm04.stdout: systemctl: stderr See "systemctl status ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" and "journalctl -xeu ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" for details. 2026-03-09T00:09:14.102 INFO:journalctl@ceph.mon.c.vm04.stdout: Traceback (most recent call last): 2026-03-09T00:09:14.102 INFO:journalctl@ceph.mon.c.vm04.stdout: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-09T00:09:14.102 INFO:journalctl@ceph.mon.c.vm04.stdout: return _run_code(code, main_globals, None, 2026-03-09T00:09:14.102 INFO:journalctl@ceph.mon.c.vm04.stdout: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-09T00:09:14.102 INFO:journalctl@ceph.mon.c.vm04.stdout: exec(code, run_globals) 2026-03-09T00:09:14.102 INFO:journalctl@ceph.mon.c.vm04.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5592, in 2026-03-09T00:09:14.102 INFO:journalctl@ceph.mon.c.vm04.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5580, in main 2026-03-09T00:09:14.102 INFO:journalctl@ceph.mon.c.vm04.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3062, in command_deploy_from 2026-03-09T00:09:14.102 INFO:journalctl@ceph.mon.c.vm04.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3097, in _common_deploy 2026-03-09T00:09:14.102 INFO:journalctl@ceph.mon.c.vm04.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3117, in _deploy_daemon_container 2026-03-09T00:09:14.102 INFO:journalctl@ceph.mon.c.vm04.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 1126, in deploy_daemon 2026-03-09T00:09:14.102 INFO:journalctl@ceph.mon.c.vm04.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/cephadmlib/call_wrappers.py", line 307, in call_throws 2026-03-09T00:09:14.102 INFO:journalctl@ceph.mon.c.vm04.stdout: RuntimeError: Failed command: systemctl restart ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv: Job for ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service failed because the control process exited with error code. 2026-03-09T00:09:14.102 INFO:journalctl@ceph.mon.c.vm04.stdout: See "systemctl status ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" and "journalctl -xeu ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" for details. 2026-03-09T00:09:14.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:13 vm04 ceph-mon[51053]: Reconfiguring prometheus.a (dependencies changed)... 2026-03-09T00:09:14.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:13 vm04 ceph-mon[51053]: Reconfiguring daemon prometheus.a on vm10 2026-03-09T00:09:14.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:13 vm04 ceph-mon[51053]: mgrmap e30: y(active, since 5s), standbys: x 2026-03-09T00:09:14.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:13 vm04 ceph-mon[51053]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-09T00:09:14.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:13 vm04 ceph-mon[51053]: from='mgr.24895 ' entity='mgr.y' 2026-03-09T00:09:14.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:13 vm04 ceph-mon[51053]: from='mgr.24895 ' entity='mgr.y' 2026-03-09T00:09:14.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:13 vm04 ceph-mon[51053]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-09T00:09:14.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:13 vm04 ceph-mon[51053]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm10.local:9095"}]: dispatch 2026-03-09T00:09:14.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:13 vm04 ceph-mon[51053]: from='mgr.24895 ' entity='mgr.y' 2026-03-09T00:09:14.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:13 vm04 ceph-mon[51053]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:09:14.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:13 vm04 ceph-mon[46823]: cephadm exited with an error code: 1, stderr: Reconfig daemon iscsi.foo.vm04.fbyciv ... 2026-03-09T00:09:14.102 INFO:journalctl@ceph.mon.a.vm04.stdout: Creating ceph-iscsi config... 2026-03-09T00:09:14.102 INFO:journalctl@ceph.mon.a.vm04.stdout: Write file: /var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/iscsi.foo.vm04.fbyciv/iscsi-gateway.cfg 2026-03-09T00:09:14.103 INFO:journalctl@ceph.mon.a.vm04.stdout: Write file: /var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/iscsi.foo.vm04.fbyciv/tcmu-runner-entrypoint.sh 2026-03-09T00:09:14.103 INFO:journalctl@ceph.mon.a.vm04.stdout: Non-zero exit code 1 from systemctl restart ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv 2026-03-09T00:09:14.103 INFO:journalctl@ceph.mon.a.vm04.stdout: systemctl: stderr Job for ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service failed because the control process exited with error code. 2026-03-09T00:09:14.103 INFO:journalctl@ceph.mon.a.vm04.stdout: systemctl: stderr See "systemctl status ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" and "journalctl -xeu ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" for details. 2026-03-09T00:09:14.103 INFO:journalctl@ceph.mon.a.vm04.stdout: Traceback (most recent call last): 2026-03-09T00:09:14.103 INFO:journalctl@ceph.mon.a.vm04.stdout: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-09T00:09:14.103 INFO:journalctl@ceph.mon.a.vm04.stdout: return _run_code(code, main_globals, None, 2026-03-09T00:09:14.103 INFO:journalctl@ceph.mon.a.vm04.stdout: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-09T00:09:14.103 INFO:journalctl@ceph.mon.a.vm04.stdout: exec(code, run_globals) 2026-03-09T00:09:14.103 INFO:journalctl@ceph.mon.a.vm04.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5592, in 2026-03-09T00:09:14.103 INFO:journalctl@ceph.mon.a.vm04.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5580, in main 2026-03-09T00:09:14.103 INFO:journalctl@ceph.mon.a.vm04.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3062, in command_deploy_from 2026-03-09T00:09:14.103 INFO:journalctl@ceph.mon.a.vm04.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3097, in _common_deploy 2026-03-09T00:09:14.103 INFO:journalctl@ceph.mon.a.vm04.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3117, in _deploy_daemon_container 2026-03-09T00:09:14.103 INFO:journalctl@ceph.mon.a.vm04.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 1126, in deploy_daemon 2026-03-09T00:09:14.103 INFO:journalctl@ceph.mon.a.vm04.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/cephadmlib/call_wrappers.py", line 307, in call_throws 2026-03-09T00:09:14.103 INFO:journalctl@ceph.mon.a.vm04.stdout: RuntimeError: Failed command: systemctl restart ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv: Job for ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service failed because the control process exited with error code. 2026-03-09T00:09:14.103 INFO:journalctl@ceph.mon.a.vm04.stdout: See "systemctl status ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" and "journalctl -xeu ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" for details. 2026-03-09T00:09:14.103 INFO:journalctl@ceph.mon.a.vm04.stdout: Traceback (most recent call last): 2026-03-09T00:09:14.103 INFO:journalctl@ceph.mon.a.vm04.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1145, in _check_daemons 2026-03-09T00:09:14.103 INFO:journalctl@ceph.mon.a.vm04.stdout: self.mgr._daemon_action(daemon_spec, action=action) 2026-03-09T00:09:14.103 INFO:journalctl@ceph.mon.a.vm04.stdout: File "/usr/share/ceph/mgr/cephadm/module.py", line 2545, in _daemon_action 2026-03-09T00:09:14.103 INFO:journalctl@ceph.mon.a.vm04.stdout: return self.wait_async( 2026-03-09T00:09:14.103 INFO:journalctl@ceph.mon.a.vm04.stdout: File "/usr/share/ceph/mgr/cephadm/module.py", line 815, in wait_async 2026-03-09T00:09:14.103 INFO:journalctl@ceph.mon.a.vm04.stdout: return self.event_loop.get_result(coro, timeout) 2026-03-09T00:09:14.103 INFO:journalctl@ceph.mon.a.vm04.stdout: File "/usr/share/ceph/mgr/cephadm/ssh.py", line 136, in get_result 2026-03-09T00:09:14.103 INFO:journalctl@ceph.mon.a.vm04.stdout: return future.result(timeout) 2026-03-09T00:09:14.103 INFO:journalctl@ceph.mon.a.vm04.stdout: File "/lib64/python3.9/concurrent/futures/_base.py", line 446, in result 2026-03-09T00:09:14.103 INFO:journalctl@ceph.mon.a.vm04.stdout: return self.__get_result() 2026-03-09T00:09:14.103 INFO:journalctl@ceph.mon.a.vm04.stdout: File "/lib64/python3.9/concurrent/futures/_base.py", line 391, in __get_result 2026-03-09T00:09:14.103 INFO:journalctl@ceph.mon.a.vm04.stdout: raise self._exception 2026-03-09T00:09:14.103 INFO:journalctl@ceph.mon.a.vm04.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1381, in _create_daemon 2026-03-09T00:09:14.103 INFO:journalctl@ceph.mon.a.vm04.stdout: out, err, code = await self._run_cephadm( 2026-03-09T00:09:14.103 INFO:journalctl@ceph.mon.a.vm04.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1724, in _run_cephadm 2026-03-09T00:09:14.103 INFO:journalctl@ceph.mon.a.vm04.stdout: raise OrchestratorError( 2026-03-09T00:09:14.103 INFO:journalctl@ceph.mon.a.vm04.stdout: orchestrator._interface.OrchestratorError: cephadm exited with an error code: 1, stderr: Reconfig daemon iscsi.foo.vm04.fbyciv ... 2026-03-09T00:09:14.103 INFO:journalctl@ceph.mon.a.vm04.stdout: Creating ceph-iscsi config... 2026-03-09T00:09:14.103 INFO:journalctl@ceph.mon.a.vm04.stdout: Write file: /var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/iscsi.foo.vm04.fbyciv/iscsi-gateway.cfg 2026-03-09T00:09:14.103 INFO:journalctl@ceph.mon.a.vm04.stdout: Write file: /var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/iscsi.foo.vm04.fbyciv/tcmu-runner-entrypoint.sh 2026-03-09T00:09:14.103 INFO:journalctl@ceph.mon.a.vm04.stdout: Non-zero exit code 1 from systemctl restart ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv 2026-03-09T00:09:14.103 INFO:journalctl@ceph.mon.a.vm04.stdout: systemctl: stderr Job for ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service failed because the control process exited with error code. 2026-03-09T00:09:14.103 INFO:journalctl@ceph.mon.a.vm04.stdout: systemctl: stderr See "systemctl status ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" and "journalctl -xeu ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" for details. 2026-03-09T00:09:14.103 INFO:journalctl@ceph.mon.a.vm04.stdout: Traceback (most recent call last): 2026-03-09T00:09:14.103 INFO:journalctl@ceph.mon.a.vm04.stdout: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-09T00:09:14.103 INFO:journalctl@ceph.mon.a.vm04.stdout: return _run_code(code, main_globals, None, 2026-03-09T00:09:14.103 INFO:journalctl@ceph.mon.a.vm04.stdout: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-09T00:09:14.103 INFO:journalctl@ceph.mon.a.vm04.stdout: exec(code, run_globals) 2026-03-09T00:09:14.103 INFO:journalctl@ceph.mon.a.vm04.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5592, in 2026-03-09T00:09:14.103 INFO:journalctl@ceph.mon.a.vm04.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5580, in main 2026-03-09T00:09:14.103 INFO:journalctl@ceph.mon.a.vm04.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3062, in command_deploy_from 2026-03-09T00:09:14.103 INFO:journalctl@ceph.mon.a.vm04.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3097, in _common_deploy 2026-03-09T00:09:14.103 INFO:journalctl@ceph.mon.a.vm04.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3117, in _deploy_daemon_container 2026-03-09T00:09:14.103 INFO:journalctl@ceph.mon.a.vm04.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 1126, in deploy_daemon 2026-03-09T00:09:14.103 INFO:journalctl@ceph.mon.a.vm04.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/cephadmlib/call_wrappers.py", line 307, in call_throws 2026-03-09T00:09:14.103 INFO:journalctl@ceph.mon.a.vm04.stdout: RuntimeError: Failed command: systemctl restart ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv: Job for ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service failed because the control process exited with error code. 2026-03-09T00:09:14.103 INFO:journalctl@ceph.mon.a.vm04.stdout: See "systemctl status ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" and "journalctl -xeu ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" for details. 2026-03-09T00:09:14.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:13 vm04 ceph-mon[46823]: Reconfiguring prometheus.a (dependencies changed)... 2026-03-09T00:09:14.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:13 vm04 ceph-mon[46823]: Reconfiguring daemon prometheus.a on vm10 2026-03-09T00:09:14.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:13 vm04 ceph-mon[46823]: mgrmap e30: y(active, since 5s), standbys: x 2026-03-09T00:09:14.104 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:13 vm04 ceph-mon[46823]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-09T00:09:14.104 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:13 vm04 ceph-mon[46823]: from='mgr.24895 ' entity='mgr.y' 2026-03-09T00:09:14.104 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:13 vm04 ceph-mon[46823]: from='mgr.24895 ' entity='mgr.y' 2026-03-09T00:09:14.104 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:13 vm04 ceph-mon[46823]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-09T00:09:14.104 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:13 vm04 ceph-mon[46823]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm10.local:9095"}]: dispatch 2026-03-09T00:09:14.104 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:13 vm04 ceph-mon[46823]: from='mgr.24895 ' entity='mgr.y' 2026-03-09T00:09:14.104 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:13 vm04 ceph-mon[46823]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:09:14.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:14 vm10 ceph-mon[48982]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-09T00:09:14.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:14 vm10 ceph-mon[48982]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm10.local:9095"}]: dispatch 2026-03-09T00:09:14.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:14 vm10 ceph-mon[48982]: mgrmap e31: y(active, since 6s), standbys: x 2026-03-09T00:09:14.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:14 vm10 ceph-mon[48982]: pgmap v6: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail 2026-03-09T00:09:15.097 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:14 vm04 ceph-mon[51053]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-09T00:09:15.097 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:14 vm04 ceph-mon[51053]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm10.local:9095"}]: dispatch 2026-03-09T00:09:15.097 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:14 vm04 ceph-mon[51053]: mgrmap e31: y(active, since 6s), standbys: x 2026-03-09T00:09:15.097 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:14 vm04 ceph-mon[51053]: pgmap v6: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail 2026-03-09T00:09:15.097 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:14 vm04 ceph-mon[46823]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-09T00:09:15.097 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:14 vm04 ceph-mon[46823]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm10.local:9095"}]: dispatch 2026-03-09T00:09:15.097 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:14 vm04 ceph-mon[46823]: mgrmap e31: y(active, since 6s), standbys: x 2026-03-09T00:09:15.097 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:14 vm04 ceph-mon[46823]: pgmap v6: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail 2026-03-09T00:09:16.053 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:15 vm04 ceph-mon[51053]: from='mgr.24895 ' entity='mgr.y' 2026-03-09T00:09:16.053 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:15 vm04 ceph-mon[51053]: from='mgr.24895 ' entity='mgr.y' 2026-03-09T00:09:16.053 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:15 vm04 ceph-mon[51053]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:09:16.053 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:15 vm04 ceph-mon[51053]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:09:16.054 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:15 vm04 ceph-mon[51053]: from='mgr.24895 ' entity='mgr.y' 2026-03-09T00:09:16.054 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:15 vm04 ceph-mon[51053]: Reconfiguring iscsi.foo.vm04.fbyciv (dependencies changed)... 2026-03-09T00:09:16.054 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:15 vm04 ceph-mon[51053]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm04.fbyciv", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T00:09:16.054 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:15 vm04 ceph-mon[51053]: from='mgr.24895 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm04.fbyciv", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T00:09:16.054 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:15 vm04 ceph-mon[51053]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:09:16.054 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:15 vm04 ceph-mon[51053]: Reconfiguring daemon iscsi.foo.vm04.fbyciv on vm04 2026-03-09T00:09:16.054 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:15 vm04 ceph-mon[51053]: from='mgr.24895 ' entity='mgr.y' 2026-03-09T00:09:16.054 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:15 vm04 ceph-mon[51053]: from='mgr.24895 ' entity='mgr.y' 2026-03-09T00:09:16.054 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:15 vm04 ceph-mon[51053]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-09T00:09:16.054 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:15 vm04 ceph-mon[51053]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-09T00:09:16.054 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:15 vm04 ceph-mon[51053]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:09:16.054 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:15 vm04 ceph-mon[46823]: from='mgr.24895 ' entity='mgr.y' 2026-03-09T00:09:16.054 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:15 vm04 ceph-mon[46823]: from='mgr.24895 ' entity='mgr.y' 2026-03-09T00:09:16.054 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:15 vm04 ceph-mon[46823]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:09:16.054 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:15 vm04 ceph-mon[46823]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:09:16.054 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:15 vm04 ceph-mon[46823]: from='mgr.24895 ' entity='mgr.y' 2026-03-09T00:09:16.054 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:15 vm04 ceph-mon[46823]: Reconfiguring iscsi.foo.vm04.fbyciv (dependencies changed)... 2026-03-09T00:09:16.054 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:15 vm04 ceph-mon[46823]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm04.fbyciv", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T00:09:16.054 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:15 vm04 ceph-mon[46823]: from='mgr.24895 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm04.fbyciv", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T00:09:16.054 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:15 vm04 ceph-mon[46823]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:09:16.054 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:15 vm04 ceph-mon[46823]: Reconfiguring daemon iscsi.foo.vm04.fbyciv on vm04 2026-03-09T00:09:16.054 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:15 vm04 ceph-mon[46823]: from='mgr.24895 ' entity='mgr.y' 2026-03-09T00:09:16.054 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:15 vm04 ceph-mon[46823]: from='mgr.24895 ' entity='mgr.y' 2026-03-09T00:09:16.054 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:15 vm04 ceph-mon[46823]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-09T00:09:16.054 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:15 vm04 ceph-mon[46823]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-09T00:09:16.054 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:15 vm04 ceph-mon[46823]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:09:16.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:15 vm10 ceph-mon[48982]: from='mgr.24895 ' entity='mgr.y' 2026-03-09T00:09:16.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:15 vm10 ceph-mon[48982]: from='mgr.24895 ' entity='mgr.y' 2026-03-09T00:09:16.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:15 vm10 ceph-mon[48982]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:09:16.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:15 vm10 ceph-mon[48982]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:09:16.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:15 vm10 ceph-mon[48982]: from='mgr.24895 ' entity='mgr.y' 2026-03-09T00:09:16.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:15 vm10 ceph-mon[48982]: Reconfiguring iscsi.foo.vm04.fbyciv (dependencies changed)... 2026-03-09T00:09:16.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:15 vm10 ceph-mon[48982]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm04.fbyciv", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T00:09:16.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:15 vm10 ceph-mon[48982]: from='mgr.24895 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm04.fbyciv", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T00:09:16.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:15 vm10 ceph-mon[48982]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:09:16.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:15 vm10 ceph-mon[48982]: Reconfiguring daemon iscsi.foo.vm04.fbyciv on vm04 2026-03-09T00:09:16.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:15 vm10 ceph-mon[48982]: from='mgr.24895 ' entity='mgr.y' 2026-03-09T00:09:16.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:15 vm10 ceph-mon[48982]: from='mgr.24895 ' entity='mgr.y' 2026-03-09T00:09:16.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:15 vm10 ceph-mon[48982]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-09T00:09:16.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:15 vm10 ceph-mon[48982]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-09T00:09:16.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:15 vm10 ceph-mon[48982]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:09:17.101 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:09:16 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:09:16.959Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:09:17.101 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:09:16 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:09:16.961Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:09:17.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:16 vm04 ceph-mon[46823]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-09T00:09:17.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:16 vm04 ceph-mon[46823]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-09T00:09:17.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:16 vm04 ceph-mon[46823]: pgmap v7: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 27 KiB/s rd, 0 B/s wr, 11 op/s 2026-03-09T00:09:17.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:16 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/3815951683' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-09T00:09:17.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:16 vm04 ceph-mon[51053]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-09T00:09:17.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:16 vm04 ceph-mon[51053]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-09T00:09:17.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:16 vm04 ceph-mon[51053]: pgmap v7: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 27 KiB/s rd, 0 B/s wr, 11 op/s 2026-03-09T00:09:17.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:16 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/3815951683' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-09T00:09:17.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:16 vm10 ceph-mon[48982]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-09T00:09:17.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:16 vm10 ceph-mon[48982]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-09T00:09:17.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:16 vm10 ceph-mon[48982]: pgmap v7: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 27 KiB/s rd, 0 B/s wr, 11 op/s 2026-03-09T00:09:17.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:16 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/3815951683' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-09T00:09:18.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:18 vm10 ceph-mon[48982]: from='mgr.24895 ' entity='mgr.y' 2026-03-09T00:09:18.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:18 vm10 ceph-mon[48982]: from='mgr.24895 ' entity='mgr.y' 2026-03-09T00:09:18.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:18 vm10 ceph-mon[48982]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:09:18.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:18 vm10 ceph-mon[48982]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:09:18.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:18 vm10 ceph-mon[48982]: from='mgr.24895 ' entity='mgr.y' 2026-03-09T00:09:18.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:18 vm10 ceph-mon[48982]: pgmap v8: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 21 KiB/s rd, 0 B/s wr, 8 op/s 2026-03-09T00:09:18.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:18 vm04 ceph-mon[46823]: from='mgr.24895 ' entity='mgr.y' 2026-03-09T00:09:18.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:18 vm04 ceph-mon[46823]: from='mgr.24895 ' entity='mgr.y' 2026-03-09T00:09:18.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:18 vm04 ceph-mon[46823]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:09:18.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:18 vm04 ceph-mon[46823]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:09:18.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:18 vm04 ceph-mon[46823]: from='mgr.24895 ' entity='mgr.y' 2026-03-09T00:09:18.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:18 vm04 ceph-mon[46823]: pgmap v8: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 21 KiB/s rd, 0 B/s wr, 8 op/s 2026-03-09T00:09:18.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:18 vm04 ceph-mon[51053]: from='mgr.24895 ' entity='mgr.y' 2026-03-09T00:09:18.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:18 vm04 ceph-mon[51053]: from='mgr.24895 ' entity='mgr.y' 2026-03-09T00:09:18.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:18 vm04 ceph-mon[51053]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:09:18.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:18 vm04 ceph-mon[51053]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:09:18.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:18 vm04 ceph-mon[51053]: from='mgr.24895 ' entity='mgr.y' 2026-03-09T00:09:18.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:18 vm04 ceph-mon[51053]: pgmap v8: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 21 KiB/s rd, 0 B/s wr, 8 op/s 2026-03-09T00:09:20.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:20 vm10 ceph-mon[48982]: pgmap v9: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 17 KiB/s rd, 0 B/s wr, 7 op/s 2026-03-09T00:09:20.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:20 vm04 ceph-mon[46823]: pgmap v9: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 17 KiB/s rd, 0 B/s wr, 7 op/s 2026-03-09T00:09:20.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:20 vm04 ceph-mon[51053]: pgmap v9: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 17 KiB/s rd, 0 B/s wr, 7 op/s 2026-03-09T00:09:22.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:22 vm10 ceph-mon[48982]: pgmap v10: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 0 B/s wr, 6 op/s 2026-03-09T00:09:22.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:22 vm04 ceph-mon[46823]: pgmap v10: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 0 B/s wr, 6 op/s 2026-03-09T00:09:22.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:22 vm04 ceph-mon[51053]: pgmap v10: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 0 B/s wr, 6 op/s 2026-03-09T00:09:24.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:23 vm10 ceph-mon[48982]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:09:24.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:23 vm04 ceph-mon[46823]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:09:24.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:23 vm04 ceph-mon[51053]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:09:25.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:24 vm10 ceph-mon[48982]: pgmap v11: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 0 B/s wr, 6 op/s 2026-03-09T00:09:25.100 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:09:25 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:09:25] "GET /metrics HTTP/1.1" 200 37546 "" "Prometheus/2.51.0" 2026-03-09T00:09:25.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:24 vm04 ceph-mon[46823]: pgmap v11: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 0 B/s wr, 6 op/s 2026-03-09T00:09:25.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:24 vm04 ceph-mon[51053]: pgmap v11: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 0 B/s wr, 6 op/s 2026-03-09T00:09:26.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:26 vm04 ceph-mon[46823]: from='client.24860 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:09:26.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:26 vm04 ceph-mon[46823]: pgmap v12: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 16 KiB/s rd, 0 B/s wr, 7 op/s 2026-03-09T00:09:26.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:26 vm04 ceph-mon[51053]: from='client.24860 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:09:26.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:26 vm04 ceph-mon[51053]: pgmap v12: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 16 KiB/s rd, 0 B/s wr, 7 op/s 2026-03-09T00:09:26.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:26 vm10 ceph-mon[48982]: from='client.24860 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:09:26.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:26 vm10 ceph-mon[48982]: pgmap v12: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 16 KiB/s rd, 0 B/s wr, 7 op/s 2026-03-09T00:09:27.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:09:26 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:09:26.961Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:09:27.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:09:26 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:09:26.962Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:09:29.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:28 vm10 ceph-mon[48982]: pgmap v13: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 767 B/s rd, 0 op/s 2026-03-09T00:09:29.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:28 vm04 ceph-mon[46823]: pgmap v13: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 767 B/s rd, 0 op/s 2026-03-09T00:09:29.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:28 vm04 ceph-mon[51053]: pgmap v13: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 767 B/s rd, 0 op/s 2026-03-09T00:09:30.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:30 vm10 ceph-mon[48982]: pgmap v14: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:09:30.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:30 vm04 ceph-mon[46823]: pgmap v14: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:09:30.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:30 vm04 ceph-mon[51053]: pgmap v14: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:09:32.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:32 vm10 ceph-mon[48982]: pgmap v15: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:09:32.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:32 vm04 ceph-mon[46823]: pgmap v15: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:09:32.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:32 vm04 ceph-mon[51053]: pgmap v15: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:09:34.543 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:34 vm10 ceph-mon[48982]: pgmap v16: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:09:34.567 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:34 vm04 ceph-mon[46823]: pgmap v16: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:09:34.567 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:34 vm04 ceph-mon[51053]: pgmap v16: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:09:35.351 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:09:35 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:09:35] "GET /metrics HTTP/1.1" 200 37546 "" "Prometheus/2.51.0" 2026-03-09T00:09:36.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:36 vm04 ceph-mon[51053]: from='client.24860 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:09:36.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:36 vm04 ceph-mon[51053]: pgmap v17: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:09:36.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:36 vm04 ceph-mon[46823]: from='client.24860 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:09:36.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:36 vm04 ceph-mon[46823]: pgmap v17: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:09:36.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:36 vm10 ceph-mon[48982]: from='client.24860 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:09:36.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:36 vm10 ceph-mon[48982]: pgmap v17: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:09:37.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:09:36 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:09:36.961Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:09:37.351 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:09:36 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:09:36.962Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:09:39.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:38 vm04 ceph-mon[46823]: pgmap v18: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:09:39.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:38 vm04 ceph-mon[46823]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:09:39.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:38 vm04 ceph-mon[51053]: pgmap v18: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:09:39.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:38 vm04 ceph-mon[51053]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:09:39.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:38 vm10 ceph-mon[48982]: pgmap v18: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:09:39.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:38 vm10 ceph-mon[48982]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:09:40.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:40 vm10 ceph-mon[48982]: pgmap v19: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:09:40.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:40 vm04 ceph-mon[51053]: pgmap v19: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:09:40.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:40 vm04 ceph-mon[46823]: pgmap v19: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:09:42.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:42 vm10 ceph-mon[48982]: pgmap v20: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:09:42.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:42 vm04 ceph-mon[51053]: pgmap v20: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:09:42.614 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:42 vm04 ceph-mon[46823]: pgmap v20: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:09:44.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:44 vm10 ceph-mon[48982]: pgmap v21: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:09:44.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:44 vm04 ceph-mon[51053]: pgmap v21: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:09:44.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:44 vm04 ceph-mon[46823]: pgmap v21: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:09:45.100 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:09:45 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:09:45] "GET /metrics HTTP/1.1" 200 37545 "" "Prometheus/2.51.0" 2026-03-09T00:09:46.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:46 vm04 ceph-mon[51053]: from='client.24860 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:09:46.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:46 vm04 ceph-mon[51053]: pgmap v22: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:09:46.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:46 vm04 ceph-mon[46823]: from='client.24860 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:09:46.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:46 vm04 ceph-mon[46823]: pgmap v22: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:09:46.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:46 vm10 ceph-mon[48982]: from='client.24860 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:09:46.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:46 vm10 ceph-mon[48982]: pgmap v22: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:09:47.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:09:46 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:09:46.962Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:09:47.351 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:09:46 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:09:46.963Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:09:49.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:48 vm04 ceph-mon[51053]: pgmap v23: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:09:49.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:48 vm04 ceph-mon[46823]: pgmap v23: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:09:49.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:48 vm10 ceph-mon[48982]: pgmap v23: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:09:50.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:50 vm10 ceph-mon[48982]: pgmap v24: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:09:50.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:50 vm04 ceph-mon[51053]: pgmap v24: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:09:50.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:50 vm04 ceph-mon[46823]: pgmap v24: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:09:52.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:52 vm10 ceph-mon[48982]: pgmap v25: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:09:52.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:52 vm04 ceph-mon[51053]: pgmap v25: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:09:52.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:52 vm04 ceph-mon[46823]: pgmap v25: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:09:54.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:53 vm04 ceph-mon[51053]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:09:54.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:53 vm04 ceph-mon[46823]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:09:54.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:53 vm10 ceph-mon[48982]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:09:55.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:54 vm10 ceph-mon[48982]: pgmap v26: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:09:55.351 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:09:55 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:09:55] "GET /metrics HTTP/1.1" 200 37536 "" "Prometheus/2.51.0" 2026-03-09T00:09:55.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:54 vm04 ceph-mon[51053]: pgmap v26: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:09:55.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:54 vm04 ceph-mon[46823]: pgmap v26: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:09:56.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:56 vm04 ceph-mon[51053]: from='client.24860 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:09:56.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:56 vm04 ceph-mon[51053]: pgmap v27: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:09:56.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:56 vm04 ceph-mon[46823]: from='client.24860 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:09:56.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:56 vm04 ceph-mon[46823]: pgmap v27: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:09:56.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:56 vm10 ceph-mon[48982]: from='client.24860 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:09:56.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:56 vm10 ceph-mon[48982]: pgmap v27: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:09:57.351 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:09:56 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:09:56.962Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:09:57.351 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:09:56 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:09:56.963Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:09:59.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:09:58 vm10 ceph-mon[48982]: pgmap v28: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:09:59.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:09:58 vm04 ceph-mon[51053]: pgmap v28: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:09:59.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:09:58 vm04 ceph-mon[46823]: pgmap v28: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:10:00.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:10:00 vm10 ceph-mon[48982]: overall HEALTH_OK 2026-03-09T00:10:00.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:10:00 vm04 ceph-mon[51053]: overall HEALTH_OK 2026-03-09T00:10:00.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:10:00 vm04 ceph-mon[46823]: overall HEALTH_OK 2026-03-09T00:10:01.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:10:01 vm10 ceph-mon[48982]: pgmap v29: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:10:01.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:10:01 vm04 ceph-mon[51053]: pgmap v29: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:10:01.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:10:01 vm04 ceph-mon[46823]: pgmap v29: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:10:02.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:10:02 vm10 ceph-mon[48982]: pgmap v30: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:10:02.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:10:02 vm04 ceph-mon[51053]: pgmap v30: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:10:02.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:10:02 vm04 ceph-mon[46823]: pgmap v30: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:10:04.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:10:04 vm10 ceph-mon[48982]: pgmap v31: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:10:04.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:10:04 vm04 ceph-mon[51053]: pgmap v31: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:10:04.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:10:04 vm04 ceph-mon[46823]: pgmap v31: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:10:05.350 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:10:05 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:10:05] "GET /metrics HTTP/1.1" 200 37536 "" "Prometheus/2.51.0" 2026-03-09T00:10:06.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:10:06 vm04 ceph-mon[51053]: from='client.24860 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:10:06.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:10:06 vm04 ceph-mon[51053]: pgmap v32: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:10:06.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:10:06 vm04 ceph-mon[46823]: from='client.24860 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:10:06.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:10:06 vm04 ceph-mon[46823]: pgmap v32: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:10:06.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:10:06 vm10 ceph-mon[48982]: from='client.24860 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:10:06.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:10:06 vm10 ceph-mon[48982]: pgmap v32: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:10:07.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:10:06 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:10:06.963Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:10:07.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:10:06 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:10:06.964Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:10:09.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:10:08 vm10 ceph-mon[48982]: pgmap v33: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:10:09.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:10:08 vm10 ceph-mon[48982]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:10:09.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:10:08 vm04 ceph-mon[51053]: pgmap v33: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:10:09.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:10:08 vm04 ceph-mon[51053]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:10:09.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:10:08 vm04 ceph-mon[46823]: pgmap v33: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:10:09.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:10:08 vm04 ceph-mon[46823]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:10:10.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:10:10 vm10 ceph-mon[48982]: pgmap v34: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:10:10.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:10:10 vm04 ceph-mon[51053]: pgmap v34: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:10:10.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:10:10 vm04 ceph-mon[46823]: pgmap v34: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:10:12.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:10:12 vm10 ceph-mon[48982]: pgmap v35: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:10:12.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:10:12 vm04 ceph-mon[51053]: pgmap v35: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:10:12.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:10:12 vm04 ceph-mon[46823]: pgmap v35: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:10:14.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:10:14 vm10 ceph-mon[48982]: pgmap v36: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:10:14.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:10:14 vm04 ceph-mon[51053]: pgmap v36: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:10:14.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:10:14 vm04 ceph-mon[46823]: pgmap v36: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:10:15.350 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:10:15 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:10:15] "GET /metrics HTTP/1.1" 200 37544 "" "Prometheus/2.51.0" 2026-03-09T00:10:16.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:10:16 vm10 ceph-mon[48982]: from='client.24860 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:10:16.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:10:16 vm10 ceph-mon[48982]: pgmap v37: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:10:16.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:10:16 vm04 ceph-mon[51053]: from='client.24860 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:10:16.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:10:16 vm04 ceph-mon[51053]: pgmap v37: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:10:16.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:10:16 vm04 ceph-mon[46823]: from='client.24860 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:10:16.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:10:16 vm04 ceph-mon[46823]: pgmap v37: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:10:17.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:10:16 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:10:16.963Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:10:17.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:10:16 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:10:16.964Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:10:17.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:10:17 vm10 ceph-mon[48982]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:10:17.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:10:17 vm04 ceph-mon[51053]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:10:17.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:10:17 vm04 ceph-mon[46823]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:10:18.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:10:18 vm10 ceph-mon[48982]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:10:18.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:10:18 vm10 ceph-mon[48982]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:10:18.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:10:18 vm10 ceph-mon[48982]: from='mgr.24895 ' entity='mgr.y' 2026-03-09T00:10:18.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:10:18 vm10 ceph-mon[48982]: pgmap v38: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:10:18.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:10:18 vm04 ceph-mon[51053]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:10:18.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:10:18 vm04 ceph-mon[51053]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:10:18.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:10:18 vm04 ceph-mon[51053]: from='mgr.24895 ' entity='mgr.y' 2026-03-09T00:10:18.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:10:18 vm04 ceph-mon[51053]: pgmap v38: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:10:18.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:10:18 vm04 ceph-mon[46823]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:10:18.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:10:18 vm04 ceph-mon[46823]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:10:18.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:10:18 vm04 ceph-mon[46823]: from='mgr.24895 ' entity='mgr.y' 2026-03-09T00:10:18.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:10:18 vm04 ceph-mon[46823]: pgmap v38: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:10:20.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:10:20 vm10 ceph-mon[48982]: pgmap v39: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:10:20.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:10:20 vm04 ceph-mon[51053]: pgmap v39: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:10:20.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:10:20 vm04 ceph-mon[46823]: pgmap v39: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:10:22.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:10:22 vm10 ceph-mon[48982]: pgmap v40: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:10:22.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:10:22 vm04 ceph-mon[51053]: pgmap v40: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:10:22.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:10:22 vm04 ceph-mon[46823]: pgmap v40: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:10:24.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:10:23 vm10 ceph-mon[48982]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:10:24.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:10:23 vm04 ceph-mon[51053]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:10:24.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:10:23 vm04 ceph-mon[46823]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:10:25.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:10:24 vm10 ceph-mon[48982]: pgmap v41: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:10:25.350 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:10:25 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:10:25] "GET /metrics HTTP/1.1" 200 37543 "" "Prometheus/2.51.0" 2026-03-09T00:10:25.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:10:24 vm04 ceph-mon[46823]: pgmap v41: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:10:25.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:10:24 vm04 ceph-mon[51053]: pgmap v41: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:10:26.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:10:26 vm10 ceph-mon[48982]: from='client.24860 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:10:26.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:10:26 vm10 ceph-mon[48982]: pgmap v42: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:10:26.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:10:26 vm04 ceph-mon[46823]: from='client.24860 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:10:26.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:10:26 vm04 ceph-mon[46823]: pgmap v42: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:10:26.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:10:26 vm04 ceph-mon[51053]: from='client.24860 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:10:26.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:10:26 vm04 ceph-mon[51053]: pgmap v42: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:10:27.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:10:26 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:10:26.964Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:10:27.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:10:26 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:10:26.965Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:10:29.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:10:28 vm10 ceph-mon[48982]: pgmap v43: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:10:29.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:10:28 vm04 ceph-mon[46823]: pgmap v43: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:10:29.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:10:28 vm04 ceph-mon[51053]: pgmap v43: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:10:30.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:10:30 vm10 ceph-mon[48982]: pgmap v44: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:10:30.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:10:30 vm04 ceph-mon[46823]: pgmap v44: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:10:30.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:10:30 vm04 ceph-mon[51053]: pgmap v44: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:10:32.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:10:32 vm10 ceph-mon[48982]: pgmap v45: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:10:32.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:10:32 vm04 ceph-mon[51053]: pgmap v45: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:10:32.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:10:32 vm04 ceph-mon[46823]: pgmap v45: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:10:34.561 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:10:34 vm04 ceph-mon[46823]: pgmap v46: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:10:34.561 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:10:34 vm04 ceph-mon[51053]: pgmap v46: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:10:34.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:10:34 vm10 ceph-mon[48982]: pgmap v46: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:10:35.350 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:10:35 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:10:35] "GET /metrics HTTP/1.1" 200 37543 "" "Prometheus/2.51.0" 2026-03-09T00:10:36.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:10:36 vm10 ceph-mon[48982]: from='client.24860 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:10:36.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:10:36 vm10 ceph-mon[48982]: pgmap v47: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:10:36.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:10:36 vm04 ceph-mon[51053]: from='client.24860 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:10:36.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:10:36 vm04 ceph-mon[51053]: pgmap v47: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:10:36.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:10:36 vm04 ceph-mon[46823]: from='client.24860 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:10:36.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:10:36 vm04 ceph-mon[46823]: pgmap v47: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:10:37.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:10:36 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:10:36.966Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:10:37.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:10:36 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:10:36.966Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:10:39.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:10:38 vm10 ceph-mon[48982]: pgmap v48: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:10:39.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:10:38 vm10 ceph-mon[48982]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:10:39.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:10:38 vm04 ceph-mon[51053]: pgmap v48: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:10:39.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:10:38 vm04 ceph-mon[51053]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:10:39.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:10:38 vm04 ceph-mon[46823]: pgmap v48: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:10:39.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:10:38 vm04 ceph-mon[46823]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:10:40.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:10:40 vm10 ceph-mon[48982]: pgmap v49: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:10:40.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:10:40 vm04 ceph-mon[51053]: pgmap v49: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:10:40.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:10:40 vm04 ceph-mon[46823]: pgmap v49: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:10:42.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:10:42 vm10 ceph-mon[48982]: pgmap v50: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:10:42.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:10:42 vm04 ceph-mon[51053]: pgmap v50: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:10:42.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:10:42 vm04 ceph-mon[46823]: pgmap v50: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:10:44.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:10:44 vm10 ceph-mon[48982]: pgmap v51: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:10:44.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:10:44 vm04 ceph-mon[51053]: pgmap v51: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:10:44.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:10:44 vm04 ceph-mon[46823]: pgmap v51: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:10:45.101 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:10:45 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:10:45] "GET /metrics HTTP/1.1" 200 37541 "" "Prometheus/2.51.0" 2026-03-09T00:10:46.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:10:46 vm10 ceph-mon[48982]: from='client.24860 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:10:46.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:10:46 vm10 ceph-mon[48982]: pgmap v52: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:10:46.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:10:46 vm04 ceph-mon[46823]: from='client.24860 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:10:46.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:10:46 vm04 ceph-mon[46823]: pgmap v52: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:10:46.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:10:46 vm04 ceph-mon[51053]: from='client.24860 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:10:46.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:10:46 vm04 ceph-mon[51053]: pgmap v52: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:10:47.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:10:46 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:10:46.966Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:10:47.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:10:46 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:10:46.967Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:10:49.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:10:48 vm10 ceph-mon[48982]: pgmap v53: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:10:49.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:10:48 vm04 ceph-mon[46823]: pgmap v53: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:10:49.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:10:48 vm04 ceph-mon[51053]: pgmap v53: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:10:50.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:10:50 vm10 ceph-mon[48982]: pgmap v54: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:10:50.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:10:50 vm04 ceph-mon[46823]: pgmap v54: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:10:50.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:10:50 vm04 ceph-mon[51053]: pgmap v54: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:10:53.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:10:52 vm10 ceph-mon[48982]: pgmap v55: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:10:53.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:10:52 vm04 ceph-mon[46823]: pgmap v55: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:10:53.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:10:52 vm04 ceph-mon[51053]: pgmap v55: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:10:54.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:10:54 vm10 ceph-mon[48982]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:10:54.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:10:54 vm04 ceph-mon[51053]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:10:54.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:10:54 vm04 ceph-mon[46823]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:10:55.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:10:55 vm10 ceph-mon[48982]: pgmap v56: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:10:55.350 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:10:55 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:10:55] "GET /metrics HTTP/1.1" 200 37540 "" "Prometheus/2.51.0" 2026-03-09T00:10:55.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:10:55 vm04 ceph-mon[51053]: pgmap v56: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:10:55.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:10:55 vm04 ceph-mon[46823]: pgmap v56: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:10:56.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:10:56 vm10 ceph-mon[48982]: from='client.24860 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:10:56.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:10:56 vm10 ceph-mon[48982]: pgmap v57: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:10:56.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:10:56 vm04 ceph-mon[51053]: from='client.24860 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:10:56.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:10:56 vm04 ceph-mon[51053]: pgmap v57: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:10:56.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:10:56 vm04 ceph-mon[46823]: from='client.24860 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:10:56.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:10:56 vm04 ceph-mon[46823]: pgmap v57: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:10:57.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:10:56 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:10:56.966Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:10:57.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:10:56 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:10:56.967Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:10:59.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:10:58 vm10 ceph-mon[48982]: pgmap v58: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:10:59.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:10:58 vm04 ceph-mon[51053]: pgmap v58: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:10:59.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:10:58 vm04 ceph-mon[46823]: pgmap v58: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:11:00.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:11:00 vm10 ceph-mon[48982]: pgmap v59: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:11:00.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:11:00 vm04 ceph-mon[51053]: pgmap v59: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:11:00.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:11:00 vm04 ceph-mon[46823]: pgmap v59: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:11:02.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:11:02 vm10 ceph-mon[48982]: pgmap v60: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:11:02.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:11:02 vm04 ceph-mon[51053]: pgmap v60: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:11:02.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:11:02 vm04 ceph-mon[46823]: pgmap v60: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:11:04.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:11:04 vm10 ceph-mon[48982]: pgmap v61: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:11:04.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:11:04 vm04 ceph-mon[51053]: pgmap v61: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:11:04.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:11:04 vm04 ceph-mon[46823]: pgmap v61: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:11:05.350 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:11:05 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:11:05] "GET /metrics HTTP/1.1" 200 37540 "" "Prometheus/2.51.0" 2026-03-09T00:11:06.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:11:06 vm10 ceph-mon[48982]: pgmap v62: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:11:06.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:11:06 vm10 ceph-mon[48982]: from='client.24860 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:11:06.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:11:06 vm04 ceph-mon[51053]: pgmap v62: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:11:06.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:11:06 vm04 ceph-mon[51053]: from='client.24860 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:11:06.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:11:06 vm04 ceph-mon[46823]: pgmap v62: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:11:06.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:11:06 vm04 ceph-mon[46823]: from='client.24860 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:11:07.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:11:06 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:11:06.967Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:11:07.351 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:11:06 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:11:06.968Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:11:09.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:11:09 vm04 ceph-mon[51053]: pgmap v63: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:11:09.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:11:09 vm04 ceph-mon[51053]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:11:09.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:11:09 vm04 ceph-mon[46823]: pgmap v63: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:11:09.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:11:09 vm04 ceph-mon[46823]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:11:10.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:11:09 vm10 ceph-mon[48982]: pgmap v63: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:11:10.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:11:09 vm10 ceph-mon[48982]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:11:11.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:11:11 vm10 ceph-mon[48982]: pgmap v64: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:11:11.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:11:11 vm04 ceph-mon[51053]: pgmap v64: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:11:11.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:11:11 vm04 ceph-mon[46823]: pgmap v64: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:11:12.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:11:12 vm10 ceph-mon[48982]: pgmap v65: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:11:12.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:11:12 vm04 ceph-mon[51053]: pgmap v65: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:11:12.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:11:12 vm04 ceph-mon[46823]: pgmap v65: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:11:14.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:11:14 vm10 ceph-mon[48982]: pgmap v66: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:11:14.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:11:14 vm04 ceph-mon[51053]: pgmap v66: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:11:14.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:11:14 vm04 ceph-mon[46823]: pgmap v66: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:11:15.350 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:11:15 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:11:15] "GET /metrics HTTP/1.1" 200 37540 "" "Prometheus/2.51.0" 2026-03-09T00:11:16.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:11:16 vm10 ceph-mon[48982]: pgmap v67: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:11:16.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:11:16 vm04 ceph-mon[51053]: pgmap v67: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:11:16.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:11:16 vm04 ceph-mon[46823]: pgmap v67: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:11:17.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:11:16 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:11:16.968Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:11:17.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:11:16 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:11:16.969Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:11:17.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:11:17 vm04 ceph-mon[51053]: from='client.24860 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:11:17.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:11:17 vm04 ceph-mon[46823]: from='client.24860 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:11:17.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:11:17 vm10 ceph-mon[48982]: from='client.24860 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:11:18.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:11:18 vm10 ceph-mon[48982]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:11:18.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:11:18 vm10 ceph-mon[48982]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:11:18.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:11:18 vm10 ceph-mon[48982]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:11:18.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:11:18 vm10 ceph-mon[48982]: from='mgr.24895 ' entity='mgr.y' 2026-03-09T00:11:18.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:11:18 vm10 ceph-mon[48982]: pgmap v68: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:11:18.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:11:18 vm04 ceph-mon[51053]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:11:18.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:11:18 vm04 ceph-mon[51053]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:11:18.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:11:18 vm04 ceph-mon[51053]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:11:18.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:11:18 vm04 ceph-mon[51053]: from='mgr.24895 ' entity='mgr.y' 2026-03-09T00:11:18.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:11:18 vm04 ceph-mon[51053]: pgmap v68: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:11:18.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:11:18 vm04 ceph-mon[46823]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:11:18.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:11:18 vm04 ceph-mon[46823]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:11:18.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:11:18 vm04 ceph-mon[46823]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:11:18.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:11:18 vm04 ceph-mon[46823]: from='mgr.24895 ' entity='mgr.y' 2026-03-09T00:11:18.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:11:18 vm04 ceph-mon[46823]: pgmap v68: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:11:20.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:11:20 vm10 ceph-mon[48982]: pgmap v69: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:11:20.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:11:20 vm04 ceph-mon[51053]: pgmap v69: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:11:20.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:11:20 vm04 ceph-mon[46823]: pgmap v69: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:11:22.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:11:22 vm10 ceph-mon[48982]: pgmap v70: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:11:22.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:11:22 vm04 ceph-mon[51053]: pgmap v70: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:11:22.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:11:22 vm04 ceph-mon[46823]: pgmap v70: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:11:24.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:11:23 vm10 ceph-mon[48982]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:11:24.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:11:23 vm04 ceph-mon[51053]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:11:24.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:11:23 vm04 ceph-mon[46823]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:11:25.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:11:24 vm10 ceph-mon[48982]: pgmap v71: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:11:25.350 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:11:25 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:11:25] "GET /metrics HTTP/1.1" 200 37552 "" "Prometheus/2.51.0" 2026-03-09T00:11:25.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:11:24 vm04 ceph-mon[51053]: pgmap v71: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:11:25.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:11:24 vm04 ceph-mon[46823]: pgmap v71: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:11:26.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:11:26 vm10 ceph-mon[48982]: pgmap v72: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:11:26.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:11:26 vm04 ceph-mon[51053]: pgmap v72: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:11:26.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:11:26 vm04 ceph-mon[46823]: pgmap v72: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:11:27.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:11:26 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:11:26.969Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:11:27.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:11:26 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:11:26.970Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:11:27.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:11:27 vm04 ceph-mon[51053]: from='client.24860 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:11:27.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:11:27 vm04 ceph-mon[46823]: from='client.24860 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:11:27.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:11:27 vm10 ceph-mon[48982]: from='client.24860 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:11:29.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:11:28 vm10 ceph-mon[48982]: pgmap v73: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:11:29.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:11:28 vm04 ceph-mon[51053]: pgmap v73: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:11:29.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:11:28 vm04 ceph-mon[46823]: pgmap v73: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:11:30.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:11:30 vm10 ceph-mon[48982]: pgmap v74: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:11:30.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:11:30 vm04 ceph-mon[51053]: pgmap v74: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:11:30.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:11:30 vm04 ceph-mon[46823]: pgmap v74: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:11:32.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:11:32 vm10 ceph-mon[48982]: pgmap v75: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:11:32.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:11:32 vm04 ceph-mon[51053]: pgmap v75: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:11:32.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:11:32 vm04 ceph-mon[46823]: pgmap v75: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:11:34.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:11:34 vm10 ceph-mon[48982]: pgmap v76: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:11:34.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:11:34 vm04 ceph-mon[51053]: pgmap v76: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:11:34.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:11:34 vm04 ceph-mon[46823]: pgmap v76: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:11:35.350 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:11:35 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:11:35] "GET /metrics HTTP/1.1" 200 37552 "" "Prometheus/2.51.0" 2026-03-09T00:11:36.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:11:36 vm10 ceph-mon[48982]: pgmap v77: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:11:36.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:11:36 vm04 ceph-mon[51053]: pgmap v77: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:11:36.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:11:36 vm04 ceph-mon[46823]: pgmap v77: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:11:37.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:11:36 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:11:36.970Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:11:37.351 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:11:36 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:11:36.971Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:11:37.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:11:37 vm04 ceph-mon[51053]: from='client.24860 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:11:37.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:11:37 vm04 ceph-mon[46823]: from='client.24860 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:11:37.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:11:37 vm10 ceph-mon[48982]: from='client.24860 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:11:39.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:11:38 vm10 ceph-mon[48982]: pgmap v78: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:11:39.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:11:38 vm10 ceph-mon[48982]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:11:39.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:11:38 vm04 ceph-mon[51053]: pgmap v78: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:11:39.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:11:38 vm04 ceph-mon[51053]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:11:39.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:11:38 vm04 ceph-mon[46823]: pgmap v78: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:11:39.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:11:38 vm04 ceph-mon[46823]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:11:40.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:11:40 vm10 ceph-mon[48982]: pgmap v79: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:11:40.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:11:40 vm04 ceph-mon[51053]: pgmap v79: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:11:40.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:11:40 vm04 ceph-mon[46823]: pgmap v79: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:11:42.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:11:42 vm10 ceph-mon[48982]: pgmap v80: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:11:42.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:11:42 vm04 ceph-mon[51053]: pgmap v80: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:11:42.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:11:42 vm04 ceph-mon[46823]: pgmap v80: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:11:44.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:11:44 vm10 ceph-mon[48982]: pgmap v81: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:11:44.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:11:44 vm04 ceph-mon[51053]: pgmap v81: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:11:44.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:11:44 vm04 ceph-mon[46823]: pgmap v81: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:11:45.350 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:11:45 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:11:45] "GET /metrics HTTP/1.1" 200 37549 "" "Prometheus/2.51.0" 2026-03-09T00:11:46.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:11:46 vm10 ceph-mon[48982]: pgmap v82: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:11:46.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:11:46 vm04 ceph-mon[46823]: pgmap v82: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:11:46.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:11:46 vm04 ceph-mon[51053]: pgmap v82: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:11:47.327 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:11:46 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:11:46.971Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:11:47.327 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:11:46 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:11:46.972Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:11:47.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:11:47 vm04 ceph-mon[51053]: from='client.24860 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:11:47.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:11:47 vm04 ceph-mon[46823]: from='client.24860 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:11:47.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:11:47 vm10 ceph-mon[48982]: from='client.24860 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:11:49.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:11:48 vm10 ceph-mon[48982]: pgmap v83: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:11:49.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:11:48 vm04 ceph-mon[51053]: pgmap v83: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:11:49.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:11:48 vm04 ceph-mon[46823]: pgmap v83: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:11:50.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:11:50 vm10 ceph-mon[48982]: pgmap v84: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:11:50.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:11:50 vm04 ceph-mon[51053]: pgmap v84: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:11:50.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:11:50 vm04 ceph-mon[46823]: pgmap v84: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:11:52.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:11:52 vm10 ceph-mon[48982]: pgmap v85: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:11:52.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:11:52 vm04 ceph-mon[51053]: pgmap v85: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:11:52.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:11:52 vm04 ceph-mon[46823]: pgmap v85: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:11:54.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:11:53 vm10 ceph-mon[48982]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:11:54.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:11:53 vm04 ceph-mon[51053]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:11:54.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:11:53 vm04 ceph-mon[46823]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:11:55.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:11:54 vm10 ceph-mon[48982]: pgmap v86: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:11:55.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:11:54 vm04 ceph-mon[46823]: pgmap v86: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:11:55.351 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:11:55 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:11:55] "GET /metrics HTTP/1.1" 200 37550 "" "Prometheus/2.51.0" 2026-03-09T00:11:55.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:11:54 vm04 ceph-mon[51053]: pgmap v86: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:11:56.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:11:56 vm10 ceph-mon[48982]: pgmap v87: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:11:56.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:11:56 vm04 ceph-mon[51053]: pgmap v87: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:11:56.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:11:56 vm04 ceph-mon[46823]: pgmap v87: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:11:57.222 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:11:56 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:11:56.972Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:11:57.222 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:11:56 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:11:56.972Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:11:57.222 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:11:57 vm04 ceph-mon[46823]: from='client.24860 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:11:57.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:11:57 vm10 ceph-mon[48982]: from='client.24860 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:11:57.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:11:57 vm04 ceph-mon[51053]: from='client.24860 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:11:59.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:11:58 vm10 ceph-mon[48982]: pgmap v88: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:11:59.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:11:58 vm04 ceph-mon[51053]: pgmap v88: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:11:59.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:11:58 vm04 ceph-mon[46823]: pgmap v88: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:12:00.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:00 vm10 ceph-mon[48982]: pgmap v89: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:12:00.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:00 vm04 ceph-mon[51053]: pgmap v89: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:12:00.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:00 vm04 ceph-mon[46823]: pgmap v89: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:12:02.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:02 vm10 ceph-mon[48982]: pgmap v90: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:12:02.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:02 vm04 ceph-mon[51053]: pgmap v90: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:12:02.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:02 vm04 ceph-mon[46823]: pgmap v90: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:12:04.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:04 vm10 ceph-mon[48982]: pgmap v91: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:12:04.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:04 vm04 ceph-mon[51053]: pgmap v91: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:12:04.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:04 vm04 ceph-mon[46823]: pgmap v91: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:12:05.350 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:12:05 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:12:05] "GET /metrics HTTP/1.1" 200 37550 "" "Prometheus/2.51.0" 2026-03-09T00:12:06.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:06 vm10 ceph-mon[48982]: pgmap v92: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:12:06.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:06 vm04 ceph-mon[46823]: pgmap v92: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:12:06.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:06 vm04 ceph-mon[51053]: pgmap v92: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:12:07.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:12:06 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:12:06.972Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:12:07.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:12:06 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:12:06.972Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:12:07.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:07 vm04 ceph-mon[51053]: from='client.24860 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:12:07.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:07 vm04 ceph-mon[46823]: from='client.24860 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:12:07.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:07 vm10 ceph-mon[48982]: from='client.24860 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:12:08.863 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch ps' 2026-03-09T00:12:09.078 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:08 vm04 ceph-mon[46823]: pgmap v93: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:12:09.078 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:08 vm04 ceph-mon[46823]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:12:09.078 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:08 vm04 ceph-mon[51053]: pgmap v93: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:12:09.078 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:08 vm04 ceph-mon[51053]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:12:09.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:08 vm10 ceph-mon[48982]: pgmap v93: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:12:09.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:08 vm10 ceph-mon[48982]: from='mgr.24895 192.168.123.104:0/2655600825' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:12:09.423 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-09T00:12:09.423 INFO:teuthology.orchestra.run.vm04.stdout:alertmanager.a vm04 *:9093,9094 running (8m) 2m ago 15m 23.7M - 0.25.0 c8568f914cd2 bcac0140b0f6 2026-03-09T00:12:09.423 INFO:teuthology.orchestra.run.vm04.stdout:grafana.a vm10 *:3000 running (8m) 2m ago 15m 48.3M - dad864ee21e9 9fb25843918b 2026-03-09T00:12:09.423 INFO:teuthology.orchestra.run.vm04.stdout:iscsi.foo.vm04.fbyciv vm04 running (2m) 2m ago 15m 49.1M - 3.5 e1d6a67b021e 41577d9a0335 2026-03-09T00:12:09.423 INFO:teuthology.orchestra.run.vm04.stdout:mgr.x vm10 *:8443,9283 running (10m) 2m ago 17m 485M - 19.2.3-678-ge911bdeb 654f31e6858e 2d7d59a967f3 2026-03-09T00:12:09.423 INFO:teuthology.orchestra.run.vm04.stdout:mgr.y vm04 *:8443,9283,8765 running (6m) 2m ago 18m 549M - 19.2.3-678-ge911bdeb 654f31e6858e 72a51572b51b 2026-03-09T00:12:09.423 INFO:teuthology.orchestra.run.vm04.stdout:mon.a vm04 running (18m) 2m ago 18m 65.0M 2048M 17.2.0 e1d6a67b021e a0a441d060f5 2026-03-09T00:12:09.423 INFO:teuthology.orchestra.run.vm04.stdout:mon.b vm10 running (17m) 2m ago 17m 51.8M 2048M 17.2.0 e1d6a67b021e a4c3c4f2dde9 2026-03-09T00:12:09.423 INFO:teuthology.orchestra.run.vm04.stdout:mon.c vm04 running (17m) 2m ago 17m 50.1M 2048M 17.2.0 e1d6a67b021e 5c2d9165643c 2026-03-09T00:12:09.423 INFO:teuthology.orchestra.run.vm04.stdout:node-exporter.a vm04 *:9100 running (8m) 2m ago 15m 9747k - 1.7.0 72c9c2088986 38e0af6b2fbf 2026-03-09T00:12:09.423 INFO:teuthology.orchestra.run.vm04.stdout:node-exporter.b vm10 *:9100 running (8m) 2m ago 15m 10.0M - 1.7.0 72c9c2088986 d059c0022310 2026-03-09T00:12:09.423 INFO:teuthology.orchestra.run.vm04.stdout:osd.0 vm04 running (17m) 2m ago 17m 56.2M 4096M 17.2.0 e1d6a67b021e eb4d6ee04c91 2026-03-09T00:12:09.423 INFO:teuthology.orchestra.run.vm04.stdout:osd.1 vm04 running (17m) 2m ago 17m 54.5M 4096M 17.2.0 e1d6a67b021e f112f05700b8 2026-03-09T00:12:09.423 INFO:teuthology.orchestra.run.vm04.stdout:osd.2 vm04 running (16m) 2m ago 16m 51.7M 4096M 17.2.0 e1d6a67b021e a4ed5ecab7e4 2026-03-09T00:12:09.423 INFO:teuthology.orchestra.run.vm04.stdout:osd.3 vm04 running (16m) 2m ago 16m 53.7M 4096M 17.2.0 e1d6a67b021e d530f6e786d9 2026-03-09T00:12:09.423 INFO:teuthology.orchestra.run.vm04.stdout:osd.4 vm10 running (16m) 2m ago 16m 54.5M 4096M 17.2.0 e1d6a67b021e ad302e6f363c 2026-03-09T00:12:09.423 INFO:teuthology.orchestra.run.vm04.stdout:osd.5 vm10 running (16m) 2m ago 16m 53.5M 4096M 17.2.0 e1d6a67b021e 1bef86fdb303 2026-03-09T00:12:09.423 INFO:teuthology.orchestra.run.vm04.stdout:osd.6 vm10 running (16m) 2m ago 16m 51.2M 4096M 17.2.0 e1d6a67b021e 168db5828111 2026-03-09T00:12:09.423 INFO:teuthology.orchestra.run.vm04.stdout:osd.7 vm10 running (16m) 2m ago 16m 55.7M 4096M 17.2.0 e1d6a67b021e bc6bbac15079 2026-03-09T00:12:09.423 INFO:teuthology.orchestra.run.vm04.stdout:prometheus.a vm10 *:9095 running (2m) 2m ago 15m 42.2M - 2.51.0 1d3b7f56885b 96d8c7720cf1 2026-03-09T00:12:09.423 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.ehrfsf vm04 *:8000 running (15m) 2m ago 15m 96.7M - 17.2.0 e1d6a67b021e a815abb0c790 2026-03-09T00:12:09.423 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm10.dwizvi vm10 *:8000 running (15m) 2m ago 15m 94.7M - 17.2.0 e1d6a67b021e f6412acdf6e0 2026-03-09T00:12:09.476 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph versions' 2026-03-09T00:12:10.031 INFO:teuthology.orchestra.run.vm04.stdout:{ 2026-03-09T00:12:10.031 INFO:teuthology.orchestra.run.vm04.stdout: "mon": { 2026-03-09T00:12:10.031 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 3 2026-03-09T00:12:10.031 INFO:teuthology.orchestra.run.vm04.stdout: }, 2026-03-09T00:12:10.031 INFO:teuthology.orchestra.run.vm04.stdout: "mgr": { 2026-03-09T00:12:10.031 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-09T00:12:10.032 INFO:teuthology.orchestra.run.vm04.stdout: }, 2026-03-09T00:12:10.032 INFO:teuthology.orchestra.run.vm04.stdout: "osd": { 2026-03-09T00:12:10.032 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 8 2026-03-09T00:12:10.032 INFO:teuthology.orchestra.run.vm04.stdout: }, 2026-03-09T00:12:10.032 INFO:teuthology.orchestra.run.vm04.stdout: "mds": {}, 2026-03-09T00:12:10.032 INFO:teuthology.orchestra.run.vm04.stdout: "rgw": { 2026-03-09T00:12:10.032 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 2 2026-03-09T00:12:10.032 INFO:teuthology.orchestra.run.vm04.stdout: }, 2026-03-09T00:12:10.032 INFO:teuthology.orchestra.run.vm04.stdout: "overall": { 2026-03-09T00:12:10.032 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 13, 2026-03-09T00:12:10.032 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-09T00:12:10.032 INFO:teuthology.orchestra.run.vm04.stdout: } 2026-03-09T00:12:10.032 INFO:teuthology.orchestra.run.vm04.stdout:} 2026-03-09T00:12:10.107 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph -s' 2026-03-09T00:12:10.289 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:10 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/4279886333' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:12:10.289 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:10 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/4279886333' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:12:10.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:10 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/4279886333' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:12:10.646 INFO:teuthology.orchestra.run.vm04.stdout: cluster: 2026-03-09T00:12:10.646 INFO:teuthology.orchestra.run.vm04.stdout: id: fdcbddf6-1b49-11f1-80b0-7392062373f9 2026-03-09T00:12:10.646 INFO:teuthology.orchestra.run.vm04.stdout: health: HEALTH_OK 2026-03-09T00:12:10.646 INFO:teuthology.orchestra.run.vm04.stdout: 2026-03-09T00:12:10.646 INFO:teuthology.orchestra.run.vm04.stdout: services: 2026-03-09T00:12:10.646 INFO:teuthology.orchestra.run.vm04.stdout: mon: 3 daemons, quorum a,c,b (age 17m) 2026-03-09T00:12:10.646 INFO:teuthology.orchestra.run.vm04.stdout: mgr: y(active, since 3m), standbys: x 2026-03-09T00:12:10.646 INFO:teuthology.orchestra.run.vm04.stdout: osd: 8 osds: 8 up (since 16m), 8 in (since 16m) 2026-03-09T00:12:10.646 INFO:teuthology.orchestra.run.vm04.stdout: rgw: 2 daemons active (2 hosts, 1 zones) 2026-03-09T00:12:10.646 INFO:teuthology.orchestra.run.vm04.stdout: 2026-03-09T00:12:10.646 INFO:teuthology.orchestra.run.vm04.stdout: data: 2026-03-09T00:12:10.646 INFO:teuthology.orchestra.run.vm04.stdout: pools: 6 pools, 161 pgs 2026-03-09T00:12:10.646 INFO:teuthology.orchestra.run.vm04.stdout: objects: 241 objects, 457 KiB 2026-03-09T00:12:10.646 INFO:teuthology.orchestra.run.vm04.stdout: usage: 101 MiB used, 160 GiB / 160 GiB avail 2026-03-09T00:12:10.646 INFO:teuthology.orchestra.run.vm04.stdout: pgs: 161 active+clean 2026-03-09T00:12:10.646 INFO:teuthology.orchestra.run.vm04.stdout: 2026-03-09T00:12:10.646 INFO:teuthology.orchestra.run.vm04.stdout: io: 2026-03-09T00:12:10.646 INFO:teuthology.orchestra.run.vm04.stdout: client: 853 B/s rd, 0 op/s rd, 0 op/s wr 2026-03-09T00:12:10.646 INFO:teuthology.orchestra.run.vm04.stdout: 2026-03-09T00:12:11.359 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph health detail' 2026-03-09T00:12:11.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:11 vm04 ceph-mon[46823]: from='client.15063 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:12:11.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:11 vm04 ceph-mon[46823]: pgmap v94: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:12:11.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:11 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/3625107190' entity='client.admin' cmd=[{"prefix": "status"}]: dispatch 2026-03-09T00:12:11.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:11 vm04 ceph-mon[51053]: from='client.15063 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:12:11.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:11 vm04 ceph-mon[51053]: pgmap v94: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:12:11.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:11 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/3625107190' entity='client.admin' cmd=[{"prefix": "status"}]: dispatch 2026-03-09T00:12:11.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:11 vm10 ceph-mon[48982]: from='client.15063 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:12:11.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:11 vm10 ceph-mon[48982]: pgmap v94: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:12:11.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:11 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/3625107190' entity='client.admin' cmd=[{"prefix": "status"}]: dispatch 2026-03-09T00:12:11.889 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_OK 2026-03-09T00:12:11.955 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph versions | jq -e '"'"'.mgr | length == 1'"'"'' 2026-03-09T00:12:12.496 INFO:teuthology.orchestra.run.vm04.stdout:true 2026-03-09T00:12:12.497 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:12 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/598033808' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T00:12:12.497 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:12 vm04 ceph-mon[51053]: pgmap v95: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:12:12.497 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:12 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/598033808' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T00:12:12.497 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:12 vm04 ceph-mon[46823]: pgmap v95: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:12:12.534 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph mgr fail' 2026-03-09T00:12:12.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:12 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/598033808' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T00:12:12.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:12 vm10 ceph-mon[48982]: pgmap v95: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:12:13.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:13 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/2526170458' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:12:13.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:13 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/3945895484' entity='client.admin' cmd=[{"prefix": "mgr fail"}]: dispatch 2026-03-09T00:12:13.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:13 vm10 ceph-mon[48982]: osdmap e89: 8 total, 8 up, 8 in 2026-03-09T00:12:13.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:13 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/2526170458' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:12:13.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:13 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/3945895484' entity='client.admin' cmd=[{"prefix": "mgr fail"}]: dispatch 2026-03-09T00:12:13.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:13 vm04 ceph-mon[51053]: osdmap e89: 8 total, 8 up, 8 in 2026-03-09T00:12:13.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:13 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/2526170458' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:12:13.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:13 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/3945895484' entity='client.admin' cmd=[{"prefix": "mgr fail"}]: dispatch 2026-03-09T00:12:13.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:13 vm04 ceph-mon[46823]: osdmap e89: 8 total, 8 up, 8 in 2026-03-09T00:12:14.142 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'sleep 180' 2026-03-09T00:12:14.328 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:12:14 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: [09/Mar/2026:00:12:14] ENGINE Bus STOPPING 2026-03-09T00:12:14.328 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:12:14 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: [09/Mar/2026:00:12:14] ENGINE HTTP Server cherrypy._cpwsgi_server.CPWSGIServer(('::', 9283)) shut down 2026-03-09T00:12:14.328 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:12:14 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: [09/Mar/2026:00:12:14] ENGINE Bus STOPPED 2026-03-09T00:12:14.349 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:12:14 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: 2026-03-09T00:12:14.048+0000 7f0ae6d88640 -1 mgr handle_mgr_map I was active but no longer am 2026-03-09T00:12:14.349 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:12:14 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: ignoring --setuser ceph since I am not root 2026-03-09T00:12:14.349 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:12:14 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: ignoring --setgroup ceph since I am not root 2026-03-09T00:12:14.349 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:12:14 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: 2026-03-09T00:12:14.217+0000 7f89e37e9140 -1 mgr[py] Module alerts has missing NOTIFY_TYPES member 2026-03-09T00:12:14.349 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:12:14 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: 2026-03-09T00:12:14.264+0000 7f89e37e9140 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-09T00:12:14.629 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:12:14 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: [09/Mar/2026:00:12:14] ENGINE Bus STARTING 2026-03-09T00:12:14.629 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:12:14 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: [09/Mar/2026:00:12:14] ENGINE Serving on http://:::9283 2026-03-09T00:12:14.629 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:12:14 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: [09/Mar/2026:00:12:14] ENGINE Bus STARTED 2026-03-09T00:12:15.040 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:15 vm04 ceph-mon[46823]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T00:12:15.040 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:12:14 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: 2026-03-09T00:12:14.769+0000 7f89e37e9140 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-09T00:12:15.294 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:15 vm04 ceph-mon[46823]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T00:12:15.294 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:15 vm04 ceph-mon[46823]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T00:12:15.294 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:15 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/3945895484' entity='client.admin' cmd='[{"prefix": "mgr fail"}]': finished 2026-03-09T00:12:15.294 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:15 vm04 ceph-mon[46823]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-09T00:12:15.294 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:15 vm04 ceph-mon[46823]: mgrmap e32: x(active, starting, since 1.0083s) 2026-03-09T00:12:15.294 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:15 vm04 ceph-mon[46823]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T00:12:15.294 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:15 vm04 ceph-mon[46823]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T00:12:15.294 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:15 vm04 ceph-mon[46823]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T00:12:15.294 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:15 vm04 ceph-mon[46823]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T00:12:15.294 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:15 vm04 ceph-mon[46823]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T00:12:15.294 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:15 vm04 ceph-mon[46823]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T00:12:15.294 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:15 vm04 ceph-mon[46823]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T00:12:15.294 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:15 vm04 ceph-mon[46823]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T00:12:15.294 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:15 vm04 ceph-mon[46823]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-09T00:12:15.294 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:15 vm04 ceph-mon[46823]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-09T00:12:15.294 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:15 vm04 ceph-mon[46823]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-09T00:12:15.294 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:15 vm04 ceph-mon[46823]: Manager daemon x is now available 2026-03-09T00:12:15.294 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:15 vm04 ceph-mon[46823]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:12:15.294 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:15 vm04 ceph-mon[46823]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:12:15.294 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:15 vm04 ceph-mon[46823]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:12:15.294 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:15 vm04 ceph-mon[46823]: from='mgr.24836 ' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:12:15.294 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:15 vm04 ceph-mon[46823]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/trash_purge_schedule"}]: dispatch 2026-03-09T00:12:15.294 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:15 vm04 ceph-mon[46823]: from='mgr.24836 ' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/trash_purge_schedule"}]: dispatch 2026-03-09T00:12:15.294 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:12:15 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: 2026-03-09T00:12:15.167+0000 7f89e37e9140 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-09T00:12:15.294 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:12:15 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: /lib64/python3.9/site-packages/scipy/__init__.py:73: UserWarning: NumPy was imported from a Python sub-interpreter but NumPy does not properly support sub-interpreters. This will likely work for most users but might cause hard to track down issues or subtle bugs. A common user of the rare sub-interpreter feature is wsgi which also allows single-interpreter mode. 2026-03-09T00:12:15.295 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:12:15 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: Improvements in the case of bugs are welcome, but is not on the NumPy roadmap, and full support may require significant effort to achieve. 2026-03-09T00:12:15.295 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:12:15 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: from numpy import show_config as show_numpy_config 2026-03-09T00:12:15.295 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:12:15 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: 2026-03-09T00:12:15.254+0000 7f89e37e9140 -1 mgr[py] Module diskprediction_local has missing NOTIFY_TYPES member 2026-03-09T00:12:15.295 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:12:15 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: 2026-03-09T00:12:15.291+0000 7f89e37e9140 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-09T00:12:15.295 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:15 vm04 ceph-mon[51053]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T00:12:15.295 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:15 vm04 ceph-mon[51053]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T00:12:15.295 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:15 vm04 ceph-mon[51053]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T00:12:15.295 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:15 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/3945895484' entity='client.admin' cmd='[{"prefix": "mgr fail"}]': finished 2026-03-09T00:12:15.295 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:15 vm04 ceph-mon[51053]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-09T00:12:15.295 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:15 vm04 ceph-mon[51053]: mgrmap e32: x(active, starting, since 1.0083s) 2026-03-09T00:12:15.295 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:15 vm04 ceph-mon[51053]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T00:12:15.295 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:15 vm04 ceph-mon[51053]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T00:12:15.295 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:15 vm04 ceph-mon[51053]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T00:12:15.295 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:15 vm04 ceph-mon[51053]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T00:12:15.295 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:15 vm04 ceph-mon[51053]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T00:12:15.295 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:15 vm04 ceph-mon[51053]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T00:12:15.295 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:15 vm04 ceph-mon[51053]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T00:12:15.295 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:15 vm04 ceph-mon[51053]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T00:12:15.295 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:15 vm04 ceph-mon[51053]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-09T00:12:15.295 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:15 vm04 ceph-mon[51053]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-09T00:12:15.295 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:15 vm04 ceph-mon[51053]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-09T00:12:15.295 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:15 vm04 ceph-mon[51053]: Manager daemon x is now available 2026-03-09T00:12:15.295 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:15 vm04 ceph-mon[51053]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:12:15.295 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:15 vm04 ceph-mon[51053]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:12:15.295 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:15 vm04 ceph-mon[51053]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:12:15.295 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:15 vm04 ceph-mon[51053]: from='mgr.24836 ' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:12:15.295 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:15 vm04 ceph-mon[51053]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/trash_purge_schedule"}]: dispatch 2026-03-09T00:12:15.295 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:15 vm04 ceph-mon[51053]: from='mgr.24836 ' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/trash_purge_schedule"}]: dispatch 2026-03-09T00:12:15.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:15 vm10 ceph-mon[48982]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T00:12:15.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:15 vm10 ceph-mon[48982]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T00:12:15.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:15 vm10 ceph-mon[48982]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T00:12:15.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:15 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/3945895484' entity='client.admin' cmd='[{"prefix": "mgr fail"}]': finished 2026-03-09T00:12:15.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:15 vm10 ceph-mon[48982]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-09T00:12:15.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:15 vm10 ceph-mon[48982]: mgrmap e32: x(active, starting, since 1.0083s) 2026-03-09T00:12:15.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:15 vm10 ceph-mon[48982]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T00:12:15.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:15 vm10 ceph-mon[48982]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T00:12:15.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:15 vm10 ceph-mon[48982]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T00:12:15.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:15 vm10 ceph-mon[48982]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T00:12:15.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:15 vm10 ceph-mon[48982]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T00:12:15.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:15 vm10 ceph-mon[48982]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T00:12:15.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:15 vm10 ceph-mon[48982]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T00:12:15.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:15 vm10 ceph-mon[48982]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T00:12:15.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:15 vm10 ceph-mon[48982]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-09T00:12:15.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:15 vm10 ceph-mon[48982]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-09T00:12:15.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:15 vm10 ceph-mon[48982]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-09T00:12:15.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:15 vm10 ceph-mon[48982]: Manager daemon x is now available 2026-03-09T00:12:15.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:15 vm10 ceph-mon[48982]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:12:15.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:15 vm10 ceph-mon[48982]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:12:15.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:15 vm10 ceph-mon[48982]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:12:15.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:15 vm10 ceph-mon[48982]: from='mgr.24836 ' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:12:15.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:15 vm10 ceph-mon[48982]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/trash_purge_schedule"}]: dispatch 2026-03-09T00:12:15.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:15 vm10 ceph-mon[48982]: from='mgr.24836 ' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/trash_purge_schedule"}]: dispatch 2026-03-09T00:12:15.601 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:12:15 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: 2026-03-09T00:12:15.369+0000 7f89e37e9140 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-09T00:12:16.068 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:12:15 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: 2026-03-09T00:12:15.915+0000 7f89e37e9140 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-09T00:12:16.068 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:12:16 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: 2026-03-09T00:12:16.062+0000 7f89e37e9140 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-09T00:12:16.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:16 vm04 ceph-mon[46823]: mgrmap e33: x(active, since 2s) 2026-03-09T00:12:16.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:16 vm04 ceph-mon[46823]: pgmap v3: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail 2026-03-09T00:12:16.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:16 vm04 ceph-mon[46823]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:12:16.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:16 vm04 ceph-mon[46823]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:12:16.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:16 vm04 ceph-mon[51053]: mgrmap e33: x(active, since 2s) 2026-03-09T00:12:16.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:16 vm04 ceph-mon[51053]: pgmap v3: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail 2026-03-09T00:12:16.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:16 vm04 ceph-mon[51053]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:12:16.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:16 vm04 ceph-mon[51053]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:12:16.351 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:12:16 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: 2026-03-09T00:12:16.110+0000 7f89e37e9140 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-09T00:12:16.352 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:12:16 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: 2026-03-09T00:12:16.155+0000 7f89e37e9140 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-09T00:12:16.352 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:12:16 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: 2026-03-09T00:12:16.201+0000 7f89e37e9140 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-09T00:12:16.352 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:12:16 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: 2026-03-09T00:12:16.239+0000 7f89e37e9140 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-09T00:12:16.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:16 vm10 ceph-mon[48982]: mgrmap e33: x(active, since 2s) 2026-03-09T00:12:16.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:16 vm10 ceph-mon[48982]: pgmap v3: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail 2026-03-09T00:12:16.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:16 vm10 ceph-mon[48982]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:12:16.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:16 vm10 ceph-mon[48982]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:12:16.703 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:12:16 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: 2026-03-09T00:12:16.414+0000 7f89e37e9140 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-09T00:12:16.703 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:12:16 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: 2026-03-09T00:12:16.466+0000 7f89e37e9140 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-09T00:12:16.973 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:12:16 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: 2026-03-09T00:12:16.702+0000 7f89e37e9140 -1 mgr[py] Module rgw has missing NOTIFY_TYPES member 2026-03-09T00:12:17.241 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:12:17 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: 2026-03-09T00:12:17.002+0000 7f89e37e9140 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-09T00:12:17.241 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:12:17 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: 2026-03-09T00:12:17.038+0000 7f89e37e9140 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-09T00:12:17.241 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:12:17 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: 2026-03-09T00:12:17.079+0000 7f89e37e9140 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-09T00:12:17.241 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:12:17 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: 2026-03-09T00:12:17.185+0000 7f89e37e9140 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-09T00:12:17.241 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:12:17 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: 2026-03-09T00:12:17.239+0000 7f89e37e9140 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-09T00:12:17.241 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:17 vm04 ceph-mon[51053]: [09/Mar/2026:00:12:15] ENGINE Bus STARTING 2026-03-09T00:12:17.241 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:17 vm04 ceph-mon[51053]: [09/Mar/2026:00:12:15] ENGINE Serving on http://192.168.123.110:8765 2026-03-09T00:12:17.241 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:17 vm04 ceph-mon[51053]: [09/Mar/2026:00:12:15] ENGINE Serving on https://192.168.123.110:7150 2026-03-09T00:12:17.241 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:17 vm04 ceph-mon[51053]: [09/Mar/2026:00:12:15] ENGINE Bus STARTED 2026-03-09T00:12:17.241 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:17 vm04 ceph-mon[51053]: [09/Mar/2026:00:12:15] ENGINE Client ('192.168.123.110', 53434) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-09T00:12:17.241 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:17 vm04 ceph-mon[51053]: pgmap v4: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail 2026-03-09T00:12:17.241 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:17 vm04 ceph-mon[51053]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:12:17.241 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:17 vm04 ceph-mon[51053]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:12:17.241 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:12:16 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:12:16.973Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:12:17.241 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:12:16 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:12:16.974Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:12:17.241 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:17 vm04 ceph-mon[46823]: [09/Mar/2026:00:12:15] ENGINE Bus STARTING 2026-03-09T00:12:17.241 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:17 vm04 ceph-mon[46823]: [09/Mar/2026:00:12:15] ENGINE Serving on http://192.168.123.110:8765 2026-03-09T00:12:17.241 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:17 vm04 ceph-mon[46823]: [09/Mar/2026:00:12:15] ENGINE Serving on https://192.168.123.110:7150 2026-03-09T00:12:17.241 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:17 vm04 ceph-mon[46823]: [09/Mar/2026:00:12:15] ENGINE Bus STARTED 2026-03-09T00:12:17.241 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:17 vm04 ceph-mon[46823]: [09/Mar/2026:00:12:15] ENGINE Client ('192.168.123.110', 53434) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-09T00:12:17.241 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:17 vm04 ceph-mon[46823]: pgmap v4: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail 2026-03-09T00:12:17.241 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:17 vm04 ceph-mon[46823]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:12:17.241 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:17 vm04 ceph-mon[46823]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:12:17.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:17 vm10 ceph-mon[48982]: [09/Mar/2026:00:12:15] ENGINE Bus STARTING 2026-03-09T00:12:17.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:17 vm10 ceph-mon[48982]: [09/Mar/2026:00:12:15] ENGINE Serving on http://192.168.123.110:8765 2026-03-09T00:12:17.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:17 vm10 ceph-mon[48982]: [09/Mar/2026:00:12:15] ENGINE Serving on https://192.168.123.110:7150 2026-03-09T00:12:17.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:17 vm10 ceph-mon[48982]: [09/Mar/2026:00:12:15] ENGINE Bus STARTED 2026-03-09T00:12:17.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:17 vm10 ceph-mon[48982]: [09/Mar/2026:00:12:15] ENGINE Client ('192.168.123.110', 53434) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-09T00:12:17.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:17 vm10 ceph-mon[48982]: pgmap v4: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail 2026-03-09T00:12:17.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:17 vm10 ceph-mon[48982]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:12:17.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:17 vm10 ceph-mon[48982]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:12:17.600 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:12:17 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: 2026-03-09T00:12:17.330+0000 7f89e37e9140 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-09T00:12:17.601 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:12:17 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: 2026-03-09T00:12:17.464+0000 7f89e37e9140 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-09T00:12:18.101 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:12:17 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: 2026-03-09T00:12:17.619+0000 7f89e37e9140 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-09T00:12:18.101 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:12:17 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: 2026-03-09T00:12:17.660+0000 7f89e37e9140 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-09T00:12:18.101 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:12:17 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: [09/Mar/2026:00:12:17] ENGINE Bus STARTING 2026-03-09T00:12:18.101 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:12:17 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: CherryPy Checker: 2026-03-09T00:12:18.101 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:12:17 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: The Application mounted at '' has an empty config. 2026-03-09T00:12:18.101 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:12:17 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: 2026-03-09T00:12:18.101 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:12:17 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: [09/Mar/2026:00:12:17] ENGINE Serving on http://:::9283 2026-03-09T00:12:18.101 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:12:17 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: [09/Mar/2026:00:12:17] ENGINE Bus STARTED 2026-03-09T00:12:18.571 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:18 vm04 ceph-mon[46823]: from='client.24860 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:12:18.571 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:18 vm04 ceph-mon[46823]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm10", "name": "osd_memory_target"}]: dispatch 2026-03-09T00:12:18.572 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:18 vm04 ceph-mon[46823]: from='mgr.24836 ' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm10", "name": "osd_memory_target"}]: dispatch 2026-03-09T00:12:18.572 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:18 vm04 ceph-mon[46823]: mgrmap e34: x(active, since 4s) 2026-03-09T00:12:18.572 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:18 vm04 ceph-mon[46823]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:12:18.572 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:18 vm04 ceph-mon[46823]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:12:18.572 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:18 vm04 ceph-mon[46823]: Standby manager daemon y started 2026-03-09T00:12:18.572 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:18 vm04 ceph-mon[46823]: from='mgr.? 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/crt"}]: dispatch 2026-03-09T00:12:18.572 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:18 vm04 ceph-mon[46823]: from='mgr.? 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-09T00:12:18.572 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:18 vm04 ceph-mon[46823]: from='mgr.? 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/key"}]: dispatch 2026-03-09T00:12:18.572 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:18 vm04 ceph-mon[46823]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:12:18.572 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:18 vm04 ceph-mon[46823]: from='mgr.? 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-09T00:12:18.572 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:18 vm04 ceph-mon[46823]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:12:18.572 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:18 vm04 ceph-mon[46823]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm04", "name": "osd_memory_target"}]: dispatch 2026-03-09T00:12:18.572 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:18 vm04 ceph-mon[46823]: from='mgr.24836 ' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm04", "name": "osd_memory_target"}]: dispatch 2026-03-09T00:12:18.572 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:18 vm04 ceph-mon[46823]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:12:18.572 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:18 vm04 ceph-mon[46823]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:12:18.572 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:18 vm04 ceph-mon[51053]: from='client.24860 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:12:18.572 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:18 vm04 ceph-mon[51053]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm10", "name": "osd_memory_target"}]: dispatch 2026-03-09T00:12:18.572 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:18 vm04 ceph-mon[51053]: from='mgr.24836 ' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm10", "name": "osd_memory_target"}]: dispatch 2026-03-09T00:12:18.572 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:18 vm04 ceph-mon[51053]: mgrmap e34: x(active, since 4s) 2026-03-09T00:12:18.572 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:18 vm04 ceph-mon[51053]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:12:18.572 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:18 vm04 ceph-mon[51053]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:12:18.572 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:18 vm04 ceph-mon[51053]: Standby manager daemon y started 2026-03-09T00:12:18.572 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:18 vm04 ceph-mon[51053]: from='mgr.? 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/crt"}]: dispatch 2026-03-09T00:12:18.572 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:18 vm04 ceph-mon[51053]: from='mgr.? 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-09T00:12:18.572 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:18 vm04 ceph-mon[51053]: from='mgr.? 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/key"}]: dispatch 2026-03-09T00:12:18.572 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:18 vm04 ceph-mon[51053]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:12:18.572 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:18 vm04 ceph-mon[51053]: from='mgr.? 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-09T00:12:18.572 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:18 vm04 ceph-mon[51053]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:12:18.572 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:18 vm04 ceph-mon[51053]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm04", "name": "osd_memory_target"}]: dispatch 2026-03-09T00:12:18.572 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:18 vm04 ceph-mon[51053]: from='mgr.24836 ' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm04", "name": "osd_memory_target"}]: dispatch 2026-03-09T00:12:18.572 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:18 vm04 ceph-mon[51053]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:12:18.572 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:18 vm04 ceph-mon[51053]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:12:18.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:18 vm10 ceph-mon[48982]: from='client.24860 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:12:18.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:18 vm10 ceph-mon[48982]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm10", "name": "osd_memory_target"}]: dispatch 2026-03-09T00:12:18.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:18 vm10 ceph-mon[48982]: from='mgr.24836 ' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm10", "name": "osd_memory_target"}]: dispatch 2026-03-09T00:12:18.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:18 vm10 ceph-mon[48982]: mgrmap e34: x(active, since 4s) 2026-03-09T00:12:18.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:18 vm10 ceph-mon[48982]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:12:18.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:18 vm10 ceph-mon[48982]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:12:18.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:18 vm10 ceph-mon[48982]: Standby manager daemon y started 2026-03-09T00:12:18.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:18 vm10 ceph-mon[48982]: from='mgr.? 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/crt"}]: dispatch 2026-03-09T00:12:18.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:18 vm10 ceph-mon[48982]: from='mgr.? 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-09T00:12:18.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:18 vm10 ceph-mon[48982]: from='mgr.? 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/key"}]: dispatch 2026-03-09T00:12:18.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:18 vm10 ceph-mon[48982]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:12:18.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:18 vm10 ceph-mon[48982]: from='mgr.? 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-09T00:12:18.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:18 vm10 ceph-mon[48982]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:12:18.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:18 vm10 ceph-mon[48982]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm04", "name": "osd_memory_target"}]: dispatch 2026-03-09T00:12:18.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:18 vm10 ceph-mon[48982]: from='mgr.24836 ' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm04", "name": "osd_memory_target"}]: dispatch 2026-03-09T00:12:18.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:18 vm10 ceph-mon[48982]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:12:18.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:18 vm10 ceph-mon[48982]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:12:19.772 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:19 vm04 ceph-mon[51053]: Updating vm04:/etc/ceph/ceph.conf 2026-03-09T00:12:19.772 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:19 vm04 ceph-mon[51053]: Updating vm10:/etc/ceph/ceph.conf 2026-03-09T00:12:19.772 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:19 vm04 ceph-mon[51053]: Updating vm10:/var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/config/ceph.conf 2026-03-09T00:12:19.772 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:19 vm04 ceph-mon[51053]: Updating vm04:/var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/config/ceph.conf 2026-03-09T00:12:19.772 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:19 vm04 ceph-mon[51053]: Updating vm10:/etc/ceph/ceph.client.admin.keyring 2026-03-09T00:12:19.772 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:19 vm04 ceph-mon[51053]: Updating vm04:/etc/ceph/ceph.client.admin.keyring 2026-03-09T00:12:19.772 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:19 vm04 ceph-mon[51053]: pgmap v5: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail 2026-03-09T00:12:19.772 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:19 vm04 ceph-mon[51053]: Updating vm04:/var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/config/ceph.client.admin.keyring 2026-03-09T00:12:19.772 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:19 vm04 ceph-mon[51053]: Updating vm10:/var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/config/ceph.client.admin.keyring 2026-03-09T00:12:19.772 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:19 vm04 ceph-mon[51053]: mgrmap e35: x(active, since 5s), standbys: y 2026-03-09T00:12:19.772 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:19 vm04 ceph-mon[51053]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-09T00:12:19.772 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:19 vm04 ceph-mon[51053]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:12:19.772 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:19 vm04 ceph-mon[51053]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:12:19.772 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:19 vm04 ceph-mon[51053]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:12:19.772 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:19 vm04 ceph-mon[51053]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:12:19.772 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:19 vm04 ceph-mon[51053]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:12:19.772 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:19 vm04 ceph-mon[51053]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm04.fbyciv", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T00:12:19.772 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:19 vm04 ceph-mon[51053]: from='mgr.24836 ' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm04.fbyciv", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T00:12:19.772 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:19 vm04 ceph-mon[51053]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:12:19.773 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:19 vm04 ceph-mon[46823]: Updating vm04:/etc/ceph/ceph.conf 2026-03-09T00:12:19.773 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:19 vm04 ceph-mon[46823]: Updating vm10:/etc/ceph/ceph.conf 2026-03-09T00:12:19.773 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:19 vm04 ceph-mon[46823]: Updating vm10:/var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/config/ceph.conf 2026-03-09T00:12:19.773 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:19 vm04 ceph-mon[46823]: Updating vm04:/var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/config/ceph.conf 2026-03-09T00:12:19.773 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:19 vm04 ceph-mon[46823]: Updating vm10:/etc/ceph/ceph.client.admin.keyring 2026-03-09T00:12:19.773 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:19 vm04 ceph-mon[46823]: Updating vm04:/etc/ceph/ceph.client.admin.keyring 2026-03-09T00:12:19.773 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:19 vm04 ceph-mon[46823]: pgmap v5: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail 2026-03-09T00:12:19.773 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:19 vm04 ceph-mon[46823]: Updating vm04:/var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/config/ceph.client.admin.keyring 2026-03-09T00:12:19.773 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:19 vm04 ceph-mon[46823]: Updating vm10:/var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/config/ceph.client.admin.keyring 2026-03-09T00:12:19.773 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:19 vm04 ceph-mon[46823]: mgrmap e35: x(active, since 5s), standbys: y 2026-03-09T00:12:19.773 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:19 vm04 ceph-mon[46823]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-09T00:12:19.773 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:19 vm04 ceph-mon[46823]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:12:19.773 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:19 vm04 ceph-mon[46823]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:12:19.773 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:19 vm04 ceph-mon[46823]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:12:19.773 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:19 vm04 ceph-mon[46823]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:12:19.773 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:19 vm04 ceph-mon[46823]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:12:19.773 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:19 vm04 ceph-mon[46823]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm04.fbyciv", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T00:12:19.773 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:19 vm04 ceph-mon[46823]: from='mgr.24836 ' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm04.fbyciv", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T00:12:19.773 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:19 vm04 ceph-mon[46823]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:12:19.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:19 vm10 ceph-mon[48982]: Updating vm04:/etc/ceph/ceph.conf 2026-03-09T00:12:19.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:19 vm10 ceph-mon[48982]: Updating vm10:/etc/ceph/ceph.conf 2026-03-09T00:12:19.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:19 vm10 ceph-mon[48982]: Updating vm10:/var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/config/ceph.conf 2026-03-09T00:12:19.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:19 vm10 ceph-mon[48982]: Updating vm04:/var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/config/ceph.conf 2026-03-09T00:12:19.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:19 vm10 ceph-mon[48982]: Updating vm10:/etc/ceph/ceph.client.admin.keyring 2026-03-09T00:12:19.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:19 vm10 ceph-mon[48982]: Updating vm04:/etc/ceph/ceph.client.admin.keyring 2026-03-09T00:12:19.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:19 vm10 ceph-mon[48982]: pgmap v5: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail 2026-03-09T00:12:19.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:19 vm10 ceph-mon[48982]: Updating vm04:/var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/config/ceph.client.admin.keyring 2026-03-09T00:12:19.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:19 vm10 ceph-mon[48982]: Updating vm10:/var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/config/ceph.client.admin.keyring 2026-03-09T00:12:19.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:19 vm10 ceph-mon[48982]: mgrmap e35: x(active, since 5s), standbys: y 2026-03-09T00:12:19.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:19 vm10 ceph-mon[48982]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-09T00:12:19.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:19 vm10 ceph-mon[48982]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:12:19.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:19 vm10 ceph-mon[48982]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:12:19.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:19 vm10 ceph-mon[48982]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:12:19.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:19 vm10 ceph-mon[48982]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:12:19.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:19 vm10 ceph-mon[48982]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:12:19.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:19 vm10 ceph-mon[48982]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm04.fbyciv", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T00:12:19.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:19 vm10 ceph-mon[48982]: from='mgr.24836 ' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm04.fbyciv", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T00:12:19.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:19 vm10 ceph-mon[48982]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:12:21.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:21 vm10 ceph-mon[48982]: Reconfiguring iscsi.foo.vm04.fbyciv (dependencies changed)... 2026-03-09T00:12:21.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:21 vm10 ceph-mon[48982]: Reconfiguring daemon iscsi.foo.vm04.fbyciv on vm04 2026-03-09T00:12:21.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:21 vm10 ceph-mon[48982]: pgmap v6: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail 2026-03-09T00:12:21.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:21 vm04 ceph-mon[51053]: Reconfiguring iscsi.foo.vm04.fbyciv (dependencies changed)... 2026-03-09T00:12:21.404 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:21 vm04 ceph-mon[51053]: Reconfiguring daemon iscsi.foo.vm04.fbyciv on vm04 2026-03-09T00:12:21.404 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:21 vm04 ceph-mon[51053]: pgmap v6: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail 2026-03-09T00:12:21.404 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:21 vm04 ceph-mon[46823]: Reconfiguring iscsi.foo.vm04.fbyciv (dependencies changed)... 2026-03-09T00:12:21.404 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:21 vm04 ceph-mon[46823]: Reconfiguring daemon iscsi.foo.vm04.fbyciv on vm04 2026-03-09T00:12:21.404 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:21 vm04 ceph-mon[46823]: pgmap v6: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail 2026-03-09T00:12:23.259 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:23 vm10 ceph-mon[48982]: pgmap v7: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 26 KiB/s rd, 0 B/s wr, 11 op/s 2026-03-09T00:12:23.259 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:23 vm10 ceph-mon[48982]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:12:23.259 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:23 vm10 ceph-mon[48982]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:12:23.259 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:23 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/1175638977' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-09T00:12:23.259 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:23 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/4130321307' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/3873514736"}]: dispatch 2026-03-09T00:12:23.260 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:23 vm10 ceph-mon[48982]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/3873514736"}]: dispatch 2026-03-09T00:12:23.260 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:12:23 vm10 systemd[1]: Stopping Ceph prometheus.a for fdcbddf6-1b49-11f1-80b0-7392062373f9... 2026-03-09T00:12:23.541 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:12:23 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[74427]: ts=2026-03-09T00:12:23.258Z caller=main.go:964 level=warn msg="Received SIGTERM, exiting gracefully..." 2026-03-09T00:12:23.541 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:12:23 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[74427]: ts=2026-03-09T00:12:23.258Z caller=main.go:988 level=info msg="Stopping scrape discovery manager..." 2026-03-09T00:12:23.541 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:12:23 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[74427]: ts=2026-03-09T00:12:23.258Z caller=main.go:1002 level=info msg="Stopping notify discovery manager..." 2026-03-09T00:12:23.541 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:12:23 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[74427]: ts=2026-03-09T00:12:23.258Z caller=manager.go:177 level=info component="rule manager" msg="Stopping rule manager..." 2026-03-09T00:12:23.541 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:12:23 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[74427]: ts=2026-03-09T00:12:23.258Z caller=main.go:998 level=info msg="Notify discovery manager stopped" 2026-03-09T00:12:23.541 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:12:23 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[74427]: ts=2026-03-09T00:12:23.258Z caller=main.go:984 level=info msg="Scrape discovery manager stopped" 2026-03-09T00:12:23.541 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:12:23 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[74427]: ts=2026-03-09T00:12:23.258Z caller=manager.go:187 level=info component="rule manager" msg="Rule manager stopped" 2026-03-09T00:12:23.541 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:12:23 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[74427]: ts=2026-03-09T00:12:23.258Z caller=main.go:1039 level=info msg="Stopping scrape manager..." 2026-03-09T00:12:23.541 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:12:23 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[74427]: ts=2026-03-09T00:12:23.258Z caller=main.go:1031 level=info msg="Scrape manager stopped" 2026-03-09T00:12:23.541 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:12:23 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[74427]: ts=2026-03-09T00:12:23.263Z caller=notifier.go:618 level=info component=notifier msg="Stopping notification manager..." 2026-03-09T00:12:23.541 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:12:23 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[74427]: ts=2026-03-09T00:12:23.263Z caller=main.go:1261 level=info msg="Notifier manager stopped" 2026-03-09T00:12:23.542 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:12:23 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[74427]: ts=2026-03-09T00:12:23.263Z caller=main.go:1273 level=info msg="See you next time!" 2026-03-09T00:12:23.542 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:12:23 vm10 podman[76562]: 2026-03-09 00:12:23.269285127 +0000 UTC m=+0.027855967 container died 96d8c7720cf16d3d7d289c2abef64bb8ace4bea9361a00f20f5621d2f0bbc9c5 (image=quay.io/prometheus/prometheus:v2.51.0, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-09T00:12:23.542 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:12:23 vm10 podman[76562]: 2026-03-09 00:12:23.286372822 +0000 UTC m=+0.044943653 container remove 96d8c7720cf16d3d7d289c2abef64bb8ace4bea9361a00f20f5621d2f0bbc9c5 (image=quay.io/prometheus/prometheus:v2.51.0, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-09T00:12:23.542 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:12:23 vm10 bash[76562]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a 2026-03-09T00:12:23.542 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:12:23 vm10 systemd[1]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@prometheus.a.service: Deactivated successfully. 2026-03-09T00:12:23.542 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:12:23 vm10 systemd[1]: Stopped Ceph prometheus.a for fdcbddf6-1b49-11f1-80b0-7392062373f9. 2026-03-09T00:12:23.542 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:12:23 vm10 systemd[1]: Starting Ceph prometheus.a for fdcbddf6-1b49-11f1-80b0-7392062373f9... 2026-03-09T00:12:23.542 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:12:23 vm10 podman[76630]: 2026-03-09 00:12:23.466234088 +0000 UTC m=+0.021697213 container create 77372237e49c99284c1c8f9fc08b5b4a999d6b9597bec34f37050d93162a9afd (image=quay.io/prometheus/prometheus:v2.51.0, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-09T00:12:23.542 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:12:23 vm10 podman[76630]: 2026-03-09 00:12:23.492332324 +0000 UTC m=+0.047795449 container init 77372237e49c99284c1c8f9fc08b5b4a999d6b9597bec34f37050d93162a9afd (image=quay.io/prometheus/prometheus:v2.51.0, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-09T00:12:23.542 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:12:23 vm10 podman[76630]: 2026-03-09 00:12:23.495128426 +0000 UTC m=+0.050591551 container start 77372237e49c99284c1c8f9fc08b5b4a999d6b9597bec34f37050d93162a9afd (image=quay.io/prometheus/prometheus:v2.51.0, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-09T00:12:23.542 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:12:23 vm10 bash[76630]: 77372237e49c99284c1c8f9fc08b5b4a999d6b9597bec34f37050d93162a9afd 2026-03-09T00:12:23.542 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:12:23 vm10 podman[76630]: 2026-03-09 00:12:23.456998014 +0000 UTC m=+0.012461150 image pull 1d3b7f56885b6dd623f1785be963aa9c195f86bc256ea454e8d02a7980b79c53 quay.io/prometheus/prometheus:v2.51.0 2026-03-09T00:12:23.542 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:12:23 vm10 systemd[1]: Started Ceph prometheus.a for fdcbddf6-1b49-11f1-80b0-7392062373f9. 2026-03-09T00:12:23.542 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:12:23 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[76640]: ts=2026-03-09T00:12:23.539Z caller=main.go:617 level=info msg="Starting Prometheus Server" mode=server version="(version=2.51.0, branch=HEAD, revision=c05c15512acb675e3f6cd662a6727854e93fc024)" 2026-03-09T00:12:23.542 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:12:23 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[76640]: ts=2026-03-09T00:12:23.539Z caller=main.go:622 level=info build_context="(go=go1.22.1, platform=linux/amd64, user=root@b5723e458358, date=20240319-10:54:45, tags=netgo,builtinassets,stringlabels)" 2026-03-09T00:12:23.542 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:12:23 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[76640]: ts=2026-03-09T00:12:23.539Z caller=main.go:623 level=info host_details="(Linux 5.14.0-686.el9.x86_64 #1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026 x86_64 vm10 (none))" 2026-03-09T00:12:23.542 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:12:23 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[76640]: ts=2026-03-09T00:12:23.539Z caller=main.go:624 level=info fd_limits="(soft=1048576, hard=1048576)" 2026-03-09T00:12:23.542 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:12:23 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[76640]: ts=2026-03-09T00:12:23.539Z caller=main.go:625 level=info vm_limits="(soft=unlimited, hard=unlimited)" 2026-03-09T00:12:23.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:23 vm04 ceph-mon[46823]: pgmap v7: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 26 KiB/s rd, 0 B/s wr, 11 op/s 2026-03-09T00:12:23.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:23 vm04 ceph-mon[46823]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:12:23.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:23 vm04 ceph-mon[46823]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:12:23.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:23 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/1175638977' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-09T00:12:23.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:23 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/4130321307' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/3873514736"}]: dispatch 2026-03-09T00:12:23.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:23 vm04 ceph-mon[46823]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/3873514736"}]: dispatch 2026-03-09T00:12:23.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:23 vm04 ceph-mon[51053]: pgmap v7: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 26 KiB/s rd, 0 B/s wr, 11 op/s 2026-03-09T00:12:23.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:23 vm04 ceph-mon[51053]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:12:23.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:23 vm04 ceph-mon[51053]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:12:23.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:23 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/1175638977' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-09T00:12:23.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:23 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/4130321307' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/3873514736"}]: dispatch 2026-03-09T00:12:23.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:23 vm04 ceph-mon[51053]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/3873514736"}]: dispatch 2026-03-09T00:12:23.828 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:12:23 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: [09/Mar/2026:00:12:23] ENGINE Bus STOPPING 2026-03-09T00:12:23.828 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:12:23 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[76640]: ts=2026-03-09T00:12:23.542Z caller=web.go:568 level=info component=web msg="Start listening for connections" address=:9095 2026-03-09T00:12:23.828 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:12:23 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[76640]: ts=2026-03-09T00:12:23.543Z caller=main.go:1129 level=info msg="Starting TSDB ..." 2026-03-09T00:12:23.828 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:12:23 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[76640]: ts=2026-03-09T00:12:23.544Z caller=tls_config.go:313 level=info component=web msg="Listening on" address=[::]:9095 2026-03-09T00:12:23.828 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:12:23 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[76640]: ts=2026-03-09T00:12:23.544Z caller=tls_config.go:316 level=info component=web msg="TLS is disabled." http2=false address=[::]:9095 2026-03-09T00:12:23.828 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:12:23 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[76640]: ts=2026-03-09T00:12:23.548Z caller=head.go:616 level=info component=tsdb msg="Replaying on-disk memory mappable chunks if any" 2026-03-09T00:12:23.828 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:12:23 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[76640]: ts=2026-03-09T00:12:23.549Z caller=head.go:698 level=info component=tsdb msg="On-disk memory mappable chunks replay completed" duration=1.002617ms 2026-03-09T00:12:23.828 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:12:23 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[76640]: ts=2026-03-09T00:12:23.549Z caller=head.go:706 level=info component=tsdb msg="Replaying WAL, this may take a while" 2026-03-09T00:12:23.828 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:12:23 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[76640]: ts=2026-03-09T00:12:23.567Z caller=head.go:778 level=info component=tsdb msg="WAL segment loaded" segment=0 maxSegment=4 2026-03-09T00:12:23.828 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:12:23 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[76640]: ts=2026-03-09T00:12:23.581Z caller=head.go:778 level=info component=tsdb msg="WAL segment loaded" segment=1 maxSegment=4 2026-03-09T00:12:23.828 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:12:23 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[76640]: ts=2026-03-09T00:12:23.587Z caller=head.go:778 level=info component=tsdb msg="WAL segment loaded" segment=2 maxSegment=4 2026-03-09T00:12:23.828 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:12:23 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[76640]: ts=2026-03-09T00:12:23.589Z caller=head.go:778 level=info component=tsdb msg="WAL segment loaded" segment=3 maxSegment=4 2026-03-09T00:12:23.828 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:12:23 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[76640]: ts=2026-03-09T00:12:23.589Z caller=head.go:778 level=info component=tsdb msg="WAL segment loaded" segment=4 maxSegment=4 2026-03-09T00:12:23.828 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:12:23 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[76640]: ts=2026-03-09T00:12:23.589Z caller=head.go:815 level=info component=tsdb msg="WAL replay completed" checkpoint_replay_duration=23.223µs wal_replay_duration=39.745894ms wbl_replay_duration=130ns total_replay_duration=40.789638ms 2026-03-09T00:12:23.828 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:12:23 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[76640]: ts=2026-03-09T00:12:23.591Z caller=main.go:1150 level=info fs_type=XFS_SUPER_MAGIC 2026-03-09T00:12:23.828 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:12:23 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[76640]: ts=2026-03-09T00:12:23.591Z caller=main.go:1153 level=info msg="TSDB started" 2026-03-09T00:12:23.828 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:12:23 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[76640]: ts=2026-03-09T00:12:23.591Z caller=main.go:1335 level=info msg="Loading configuration file" filename=/etc/prometheus/prometheus.yml 2026-03-09T00:12:23.828 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:12:23 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[76640]: ts=2026-03-09T00:12:23.614Z caller=main.go:1372 level=info msg="Completed loading of configuration file" filename=/etc/prometheus/prometheus.yml totalDuration=23.274553ms db_storage=731ns remote_storage=842ns web_handler=351ns query_engine=791ns scrape=523.349µs scrape_sd=71.163µs notify=7.214µs notify_sd=5.601µs rules=22.435642ms tracing=6.112µs 2026-03-09T00:12:23.828 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:12:23 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[76640]: ts=2026-03-09T00:12:23.614Z caller=main.go:1114 level=info msg="Server is ready to receive web requests." 2026-03-09T00:12:23.828 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:12:23 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[76640]: ts=2026-03-09T00:12:23.614Z caller=manager.go:163 level=info component="rule manager" msg="Starting rule manager..." 2026-03-09T00:12:24.222 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:24 vm10 ceph-mon[48982]: Reconfiguring prometheus.a (dependencies changed)... 2026-03-09T00:12:24.222 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:24 vm10 ceph-mon[48982]: Reconfiguring daemon prometheus.a on vm10 2026-03-09T00:12:24.222 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:24 vm10 ceph-mon[48982]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/3873514736"}]': finished 2026-03-09T00:12:24.222 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:24 vm10 ceph-mon[48982]: osdmap e90: 8 total, 8 up, 8 in 2026-03-09T00:12:24.222 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:24 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/2121970502' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/3765930744"}]: dispatch 2026-03-09T00:12:24.222 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:24 vm10 ceph-mon[48982]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:12:24.222 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:24 vm10 ceph-mon[48982]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:12:24.222 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:24 vm10 ceph-mon[48982]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-09T00:12:24.222 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:24 vm10 ceph-mon[48982]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-09T00:12:24.222 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:24 vm10 ceph-mon[48982]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-09T00:12:24.222 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:24 vm10 ceph-mon[48982]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://host.containers.internal:9095"}]: dispatch 2026-03-09T00:12:24.222 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:24 vm10 ceph-mon[48982]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:12:24.222 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:24 vm10 ceph-mon[48982]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:12:24.222 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:12:24 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: [09/Mar/2026:00:12:24] ENGINE HTTP Server cherrypy._cpwsgi_server.CPWSGIServer(('::', 9283)) shut down 2026-03-09T00:12:24.223 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:12:24 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: [09/Mar/2026:00:12:24] ENGINE Bus STOPPED 2026-03-09T00:12:24.223 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:12:24 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: [09/Mar/2026:00:12:24] ENGINE Bus STARTING 2026-03-09T00:12:24.223 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:12:24 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: [09/Mar/2026:00:12:24] ENGINE Serving on http://:::9283 2026-03-09T00:12:24.223 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:12:24 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: [09/Mar/2026:00:12:24] ENGINE Bus STARTED 2026-03-09T00:12:24.263 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:24 vm04 ceph-mon[51053]: Reconfiguring prometheus.a (dependencies changed)... 2026-03-09T00:12:24.263 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:24 vm04 ceph-mon[51053]: Reconfiguring daemon prometheus.a on vm10 2026-03-09T00:12:24.263 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:24 vm04 ceph-mon[51053]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/3873514736"}]': finished 2026-03-09T00:12:24.263 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:24 vm04 ceph-mon[51053]: osdmap e90: 8 total, 8 up, 8 in 2026-03-09T00:12:24.263 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:24 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/2121970502' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/3765930744"}]: dispatch 2026-03-09T00:12:24.263 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:24 vm04 ceph-mon[51053]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:12:24.263 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:24 vm04 ceph-mon[51053]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:12:24.263 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:24 vm04 ceph-mon[51053]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-09T00:12:24.263 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:24 vm04 ceph-mon[51053]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-09T00:12:24.263 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:24 vm04 ceph-mon[51053]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-09T00:12:24.263 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:24 vm04 ceph-mon[51053]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://host.containers.internal:9095"}]: dispatch 2026-03-09T00:12:24.263 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:24 vm04 ceph-mon[51053]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:12:24.263 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:24 vm04 ceph-mon[51053]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:12:24.264 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:24 vm04 ceph-mon[46823]: Reconfiguring prometheus.a (dependencies changed)... 2026-03-09T00:12:24.264 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:24 vm04 ceph-mon[46823]: Reconfiguring daemon prometheus.a on vm10 2026-03-09T00:12:24.264 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:24 vm04 ceph-mon[46823]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/3873514736"}]': finished 2026-03-09T00:12:24.264 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:24 vm04 ceph-mon[46823]: osdmap e90: 8 total, 8 up, 8 in 2026-03-09T00:12:24.264 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:24 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/2121970502' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/3765930744"}]: dispatch 2026-03-09T00:12:24.265 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:24 vm04 ceph-mon[46823]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:12:24.265 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:24 vm04 ceph-mon[46823]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:12:24.265 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:24 vm04 ceph-mon[46823]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-09T00:12:24.265 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:24 vm04 ceph-mon[46823]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-09T00:12:24.265 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:24 vm04 ceph-mon[46823]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-09T00:12:24.265 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:24 vm04 ceph-mon[46823]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://host.containers.internal:9095"}]: dispatch 2026-03-09T00:12:24.265 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:24 vm04 ceph-mon[46823]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:12:24.265 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:24 vm04 ceph-mon[46823]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:12:25.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:25 vm10 ceph-mon[48982]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-09T00:12:25.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:25 vm10 ceph-mon[48982]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-09T00:12:25.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:25 vm10 ceph-mon[48982]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-09T00:12:25.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:25 vm10 ceph-mon[48982]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://host.containers.internal:9095"}]: dispatch 2026-03-09T00:12:25.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:25 vm10 ceph-mon[48982]: pgmap v9: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 20 KiB/s rd, 0 B/s wr, 8 op/s 2026-03-09T00:12:25.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:25 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/2121970502' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/3765930744"}]': finished 2026-03-09T00:12:25.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:25 vm10 ceph-mon[48982]: osdmap e91: 8 total, 8 up, 8 in 2026-03-09T00:12:25.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:25 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/3178823445' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:6801/359514505"}]: dispatch 2026-03-09T00:12:25.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:25 vm10 ceph-mon[48982]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:6801/359514505"}]: dispatch 2026-03-09T00:12:25.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:25 vm10 ceph-mon[48982]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:12:25.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:25 vm10 ceph-mon[48982]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:12:25.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:25 vm10 ceph-mon[48982]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:12:25.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:25 vm10 ceph-mon[48982]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:12:25.579 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:25 vm10 ceph-mon[48982]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:12:25.579 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:25 vm10 ceph-mon[48982]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:12:25.579 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:25 vm10 ceph-mon[48982]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:12:25.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:25 vm04 ceph-mon[51053]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-09T00:12:25.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:25 vm04 ceph-mon[51053]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-09T00:12:25.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:25 vm04 ceph-mon[51053]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-09T00:12:25.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:25 vm04 ceph-mon[51053]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://host.containers.internal:9095"}]: dispatch 2026-03-09T00:12:25.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:25 vm04 ceph-mon[51053]: pgmap v9: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 20 KiB/s rd, 0 B/s wr, 8 op/s 2026-03-09T00:12:25.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:25 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/2121970502' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/3765930744"}]': finished 2026-03-09T00:12:25.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:25 vm04 ceph-mon[51053]: osdmap e91: 8 total, 8 up, 8 in 2026-03-09T00:12:25.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:25 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/3178823445' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:6801/359514505"}]: dispatch 2026-03-09T00:12:25.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:25 vm04 ceph-mon[51053]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:6801/359514505"}]: dispatch 2026-03-09T00:12:25.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:25 vm04 ceph-mon[51053]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:12:25.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:25 vm04 ceph-mon[51053]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:12:25.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:25 vm04 ceph-mon[51053]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:12:25.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:25 vm04 ceph-mon[51053]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:12:25.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:25 vm04 ceph-mon[51053]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:12:25.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:25 vm04 ceph-mon[51053]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:12:25.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:25 vm04 ceph-mon[51053]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:12:25.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:25 vm04 ceph-mon[46823]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-09T00:12:25.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:25 vm04 ceph-mon[46823]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-09T00:12:25.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:25 vm04 ceph-mon[46823]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-09T00:12:25.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:25 vm04 ceph-mon[46823]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://host.containers.internal:9095"}]: dispatch 2026-03-09T00:12:25.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:25 vm04 ceph-mon[46823]: pgmap v9: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 20 KiB/s rd, 0 B/s wr, 8 op/s 2026-03-09T00:12:25.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:25 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/2121970502' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/3765930744"}]': finished 2026-03-09T00:12:25.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:25 vm04 ceph-mon[46823]: osdmap e91: 8 total, 8 up, 8 in 2026-03-09T00:12:25.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:25 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/3178823445' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:6801/359514505"}]: dispatch 2026-03-09T00:12:25.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:25 vm04 ceph-mon[46823]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:6801/359514505"}]: dispatch 2026-03-09T00:12:25.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:25 vm04 ceph-mon[46823]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:12:25.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:25 vm04 ceph-mon[46823]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:12:25.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:25 vm04 ceph-mon[46823]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:12:25.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:25 vm04 ceph-mon[46823]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:12:25.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:25 vm04 ceph-mon[46823]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:12:25.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:25 vm04 ceph-mon[46823]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:12:25.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:25 vm04 ceph-mon[46823]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:12:26.654 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:26 vm10 ceph-mon[48982]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:6801/359514505"}]': finished 2026-03-09T00:12:26.654 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:26 vm10 ceph-mon[48982]: osdmap e92: 8 total, 8 up, 8 in 2026-03-09T00:12:26.654 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:26 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/1245930779' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:6800/359514505"}]: dispatch 2026-03-09T00:12:26.654 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:26 vm10 ceph-mon[48982]: pgmap v12: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 31 KiB/s rd, 0 B/s wr, 13 op/s 2026-03-09T00:12:26.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:26 vm04 ceph-mon[51053]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:6801/359514505"}]': finished 2026-03-09T00:12:26.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:26 vm04 ceph-mon[51053]: osdmap e92: 8 total, 8 up, 8 in 2026-03-09T00:12:26.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:26 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/1245930779' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:6800/359514505"}]: dispatch 2026-03-09T00:12:26.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:26 vm04 ceph-mon[51053]: pgmap v12: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 31 KiB/s rd, 0 B/s wr, 13 op/s 2026-03-09T00:12:26.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:26 vm04 ceph-mon[46823]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:6801/359514505"}]': finished 2026-03-09T00:12:26.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:26 vm04 ceph-mon[46823]: osdmap e92: 8 total, 8 up, 8 in 2026-03-09T00:12:26.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:26 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/1245930779' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:6800/359514505"}]: dispatch 2026-03-09T00:12:26.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:26 vm04 ceph-mon[46823]: pgmap v12: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 31 KiB/s rd, 0 B/s wr, 13 op/s 2026-03-09T00:12:27.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:12:26 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:12:26.973Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:12:27.351 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:12:26 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:12:26.974Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:12:27.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:27 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/1245930779' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:6800/359514505"}]': finished 2026-03-09T00:12:27.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:27 vm10 ceph-mon[48982]: osdmap e93: 8 total, 8 up, 8 in 2026-03-09T00:12:27.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:27 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/1748326645' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/1520037998"}]: dispatch 2026-03-09T00:12:27.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:27 vm10 ceph-mon[48982]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/1520037998"}]: dispatch 2026-03-09T00:12:27.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:27 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/1245930779' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:6800/359514505"}]': finished 2026-03-09T00:12:27.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:27 vm04 ceph-mon[51053]: osdmap e93: 8 total, 8 up, 8 in 2026-03-09T00:12:27.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:27 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/1748326645' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/1520037998"}]: dispatch 2026-03-09T00:12:27.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:27 vm04 ceph-mon[51053]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/1520037998"}]: dispatch 2026-03-09T00:12:27.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:27 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/1245930779' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:6800/359514505"}]': finished 2026-03-09T00:12:27.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:27 vm04 ceph-mon[46823]: osdmap e93: 8 total, 8 up, 8 in 2026-03-09T00:12:27.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:27 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/1748326645' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/1520037998"}]: dispatch 2026-03-09T00:12:27.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:27 vm04 ceph-mon[46823]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/1520037998"}]: dispatch 2026-03-09T00:12:28.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:28 vm10 ceph-mon[48982]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/1520037998"}]': finished 2026-03-09T00:12:28.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:28 vm10 ceph-mon[48982]: osdmap e94: 8 total, 8 up, 8 in 2026-03-09T00:12:28.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:28 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/609177311' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/1764453145"}]: dispatch 2026-03-09T00:12:28.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:28 vm10 ceph-mon[48982]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/1764453145"}]: dispatch 2026-03-09T00:12:28.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:28 vm10 ceph-mon[48982]: pgmap v15: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 767 B/s rd, 0 op/s 2026-03-09T00:12:28.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:28 vm04 ceph-mon[46823]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/1520037998"}]': finished 2026-03-09T00:12:28.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:28 vm04 ceph-mon[46823]: osdmap e94: 8 total, 8 up, 8 in 2026-03-09T00:12:28.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:28 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/609177311' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/1764453145"}]: dispatch 2026-03-09T00:12:28.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:28 vm04 ceph-mon[46823]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/1764453145"}]: dispatch 2026-03-09T00:12:28.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:28 vm04 ceph-mon[46823]: pgmap v15: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 767 B/s rd, 0 op/s 2026-03-09T00:12:28.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:28 vm04 ceph-mon[51053]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/1520037998"}]': finished 2026-03-09T00:12:28.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:28 vm04 ceph-mon[51053]: osdmap e94: 8 total, 8 up, 8 in 2026-03-09T00:12:28.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:28 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/609177311' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/1764453145"}]: dispatch 2026-03-09T00:12:28.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:28 vm04 ceph-mon[51053]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/1764453145"}]: dispatch 2026-03-09T00:12:28.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:28 vm04 ceph-mon[51053]: pgmap v15: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail; 767 B/s rd, 0 op/s 2026-03-09T00:12:29.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:29 vm10 ceph-mon[48982]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/1764453145"}]': finished 2026-03-09T00:12:29.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:29 vm10 ceph-mon[48982]: osdmap e95: 8 total, 8 up, 8 in 2026-03-09T00:12:29.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:29 vm10 ceph-mon[48982]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:12:29.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:29 vm04 ceph-mon[51053]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/1764453145"}]': finished 2026-03-09T00:12:29.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:29 vm04 ceph-mon[51053]: osdmap e95: 8 total, 8 up, 8 in 2026-03-09T00:12:29.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:29 vm04 ceph-mon[51053]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:12:29.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:29 vm04 ceph-mon[46823]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/1764453145"}]': finished 2026-03-09T00:12:29.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:29 vm04 ceph-mon[46823]: osdmap e95: 8 total, 8 up, 8 in 2026-03-09T00:12:29.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:29 vm04 ceph-mon[46823]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:12:30.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:30 vm10 ceph-mon[48982]: pgmap v17: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail 2026-03-09T00:12:30.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:30 vm04 ceph-mon[46823]: pgmap v17: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail 2026-03-09T00:12:30.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:30 vm04 ceph-mon[51053]: pgmap v17: 161 pgs: 161 active+clean; 457 KiB data, 101 MiB used, 160 GiB / 160 GiB avail 2026-03-09T00:12:33.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:33 vm10 ceph-mon[48982]: pgmap v18: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:12:33.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:33 vm10 ceph-mon[48982]: from='client.15114 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:12:33.578 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:12:33 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:12:33] "GET /metrics HTTP/1.1" 200 37550 "" "Prometheus/2.51.0" 2026-03-09T00:12:33.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:33 vm04 ceph-mon[51053]: pgmap v18: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:12:33.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:33 vm04 ceph-mon[51053]: from='client.15114 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:12:33.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:33 vm04 ceph-mon[46823]: pgmap v18: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:12:33.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:33 vm04 ceph-mon[46823]: from='client.15114 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:12:34.502 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:34 vm04 ceph-mon[51053]: pgmap v19: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 681 B/s rd, 0 op/s 2026-03-09T00:12:34.502 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:34 vm04 ceph-mon[46823]: pgmap v19: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 681 B/s rd, 0 op/s 2026-03-09T00:12:34.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:34 vm10 ceph-mon[48982]: pgmap v19: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 681 B/s rd, 0 op/s 2026-03-09T00:12:37.351 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:12:36 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:12:36.974Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:12:37.351 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:12:36 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:12:36.975Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:12:37.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:37 vm04 ceph-mon[51053]: pgmap v20: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:12:37.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:37 vm04 ceph-mon[46823]: pgmap v20: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:12:37.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:37 vm10 ceph-mon[48982]: pgmap v20: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:12:39.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:39 vm10 ceph-mon[48982]: pgmap v21: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-09T00:12:39.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:39 vm04 ceph-mon[51053]: pgmap v21: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-09T00:12:39.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:39 vm04 ceph-mon[46823]: pgmap v21: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-09T00:12:41.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:41 vm10 ceph-mon[48982]: pgmap v22: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 890 B/s rd, 0 op/s 2026-03-09T00:12:41.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:41 vm04 ceph-mon[51053]: pgmap v22: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 890 B/s rd, 0 op/s 2026-03-09T00:12:41.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:41 vm04 ceph-mon[46823]: pgmap v22: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 890 B/s rd, 0 op/s 2026-03-09T00:12:43.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:43 vm10 ceph-mon[48982]: pgmap v23: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:12:43.578 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:12:43 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:12:43] "GET /metrics HTTP/1.1" 200 37550 "" "Prometheus/2.51.0" 2026-03-09T00:12:43.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:43 vm04 ceph-mon[51053]: pgmap v23: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:12:43.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:43 vm04 ceph-mon[46823]: pgmap v23: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:12:44.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:44 vm10 ceph-mon[48982]: from='client.15114 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:12:44.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:44 vm04 ceph-mon[51053]: from='client.15114 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:12:44.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:44 vm04 ceph-mon[46823]: from='client.15114 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:12:45.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:45 vm10 ceph-mon[48982]: pgmap v24: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:12:45.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:45 vm10 ceph-mon[48982]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:12:45.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:45 vm04 ceph-mon[51053]: pgmap v24: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:12:45.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:45 vm04 ceph-mon[51053]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:12:45.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:45 vm04 ceph-mon[46823]: pgmap v24: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:12:45.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:45 vm04 ceph-mon[46823]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:12:47.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:12:46 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:12:46.976Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:12:47.351 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:12:46 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:12:46.977Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:12:47.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:47 vm04 ceph-mon[51053]: pgmap v25: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:12:47.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:47 vm04 ceph-mon[46823]: pgmap v25: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:12:47.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:47 vm10 ceph-mon[48982]: pgmap v25: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:12:49.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:49 vm10 ceph-mon[48982]: pgmap v26: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:12:49.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:49 vm04 ceph-mon[51053]: pgmap v26: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:12:49.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:49 vm04 ceph-mon[46823]: pgmap v26: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:12:51.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:51 vm10 ceph-mon[48982]: pgmap v27: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:12:51.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:51 vm04 ceph-mon[51053]: pgmap v27: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:12:51.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:51 vm04 ceph-mon[46823]: pgmap v27: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:12:53.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:53 vm10 ceph-mon[48982]: pgmap v28: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:12:53.578 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:12:53 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:12:53] "GET /metrics HTTP/1.1" 200 37547 "" "Prometheus/2.51.0" 2026-03-09T00:12:53.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:53 vm04 ceph-mon[51053]: pgmap v28: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:12:53.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:53 vm04 ceph-mon[46823]: pgmap v28: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:12:54.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:54 vm10 ceph-mon[48982]: from='client.15114 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:12:54.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:54 vm04 ceph-mon[51053]: from='client.15114 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:12:54.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:54 vm04 ceph-mon[46823]: from='client.15114 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:12:55.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:55 vm10 ceph-mon[48982]: pgmap v29: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:12:55.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:55 vm04 ceph-mon[51053]: pgmap v29: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:12:55.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:55 vm04 ceph-mon[46823]: pgmap v29: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:12:57.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:12:56 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:12:56.977Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:12:57.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:12:56 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:12:56.978Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:12:57.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:57 vm04 ceph-mon[51053]: pgmap v30: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:12:57.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:57 vm04 ceph-mon[46823]: pgmap v30: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:12:57.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:57 vm10 ceph-mon[48982]: pgmap v30: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:12:59.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:12:59 vm10 ceph-mon[48982]: pgmap v31: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:12:59.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:12:59 vm04 ceph-mon[51053]: pgmap v31: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:12:59.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:12:59 vm04 ceph-mon[46823]: pgmap v31: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:13:00.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:13:00 vm04 ceph-mon[51053]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:13:00.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:13:00 vm04 ceph-mon[46823]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:13:00.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:13:00 vm10 ceph-mon[48982]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:13:01.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:13:01 vm10 ceph-mon[48982]: pgmap v32: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:13:01.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:13:01 vm04 ceph-mon[51053]: pgmap v32: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:13:01.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:13:01 vm04 ceph-mon[46823]: pgmap v32: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:13:02.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:13:02 vm10 ceph-mon[48982]: pgmap v33: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:13:02.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:13:02 vm04 ceph-mon[51053]: pgmap v33: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:13:02.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:13:02 vm04 ceph-mon[46823]: pgmap v33: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:13:03.578 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:13:03 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:13:03] "GET /metrics HTTP/1.1" 200 37545 "" "Prometheus/2.51.0" 2026-03-09T00:13:04.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:13:04 vm04 ceph-mon[51053]: from='client.15114 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:13:04.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:13:04 vm04 ceph-mon[46823]: from='client.15114 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:13:04.479 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:13:04 vm10 ceph-mon[48982]: from='client.15114 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:13:05.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:13:05 vm10 ceph-mon[48982]: pgmap v34: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:13:05.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:13:05 vm04 ceph-mon[51053]: pgmap v34: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:13:05.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:13:05 vm04 ceph-mon[46823]: pgmap v34: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:13:07.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:13:07 vm10 ceph-mon[48982]: pgmap v35: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:13:07.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:13:06 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:13:06.978Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:13:07.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:13:06 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:13:06.979Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:13:07.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:13:07 vm04 ceph-mon[51053]: pgmap v35: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:13:07.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:13:07 vm04 ceph-mon[46823]: pgmap v35: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:13:09.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:13:09 vm10 ceph-mon[48982]: pgmap v36: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:13:09.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:13:09 vm04 ceph-mon[51053]: pgmap v36: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:13:09.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:13:09 vm04 ceph-mon[46823]: pgmap v36: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:13:11.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:13:11 vm10 ceph-mon[48982]: pgmap v37: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:13:11.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:13:11 vm04 ceph-mon[51053]: pgmap v37: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:13:11.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:13:11 vm04 ceph-mon[46823]: pgmap v37: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:13:13.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:13:13 vm10 ceph-mon[48982]: pgmap v38: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:13:13.578 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:13:13 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:13:13] "GET /metrics HTTP/1.1" 200 37545 "" "Prometheus/2.51.0" 2026-03-09T00:13:13.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:13:13 vm04 ceph-mon[51053]: pgmap v38: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:13:13.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:13:13 vm04 ceph-mon[46823]: pgmap v38: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:13:14.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:13:14 vm10 ceph-mon[48982]: from='client.15114 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:13:14.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:13:14 vm04 ceph-mon[51053]: from='client.15114 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:13:14.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:13:14 vm04 ceph-mon[46823]: from='client.15114 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:13:15.214 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:13:15 vm10 ceph-mon[48982]: pgmap v39: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:13:15.214 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:13:15 vm10 ceph-mon[48982]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:13:15.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:13:15 vm04 ceph-mon[46823]: pgmap v39: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:13:15.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:13:15 vm04 ceph-mon[46823]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:13:15.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:13:15 vm04 ceph-mon[51053]: pgmap v39: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:13:15.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:13:15 vm04 ceph-mon[51053]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:13:17.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:13:16 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:13:16.979Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:13:17.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:13:16 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:13:16.979Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:13:17.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:13:17 vm04 ceph-mon[46823]: pgmap v40: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:13:17.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:13:17 vm04 ceph-mon[51053]: pgmap v40: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:13:17.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:13:17 vm10 ceph-mon[48982]: pgmap v40: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:13:19.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:13:19 vm10 ceph-mon[48982]: pgmap v41: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:13:19.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:13:19 vm04 ceph-mon[51053]: pgmap v41: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:13:19.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:13:19 vm04 ceph-mon[46823]: pgmap v41: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:13:21.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:13:21 vm10 ceph-mon[48982]: pgmap v42: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:13:21.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:13:21 vm04 ceph-mon[51053]: pgmap v42: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:13:21.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:13:21 vm04 ceph-mon[46823]: pgmap v42: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:13:23.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:13:23 vm10 ceph-mon[48982]: pgmap v43: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:13:23.578 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:13:23 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:13:23] "GET /metrics HTTP/1.1" 200 37544 "" "Prometheus/2.51.0" 2026-03-09T00:13:23.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:13:23 vm04 ceph-mon[51053]: pgmap v43: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:13:23.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:13:23 vm04 ceph-mon[46823]: pgmap v43: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:13:24.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:13:24 vm10 ceph-mon[48982]: from='client.15114 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:13:24.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:13:24 vm04 ceph-mon[51053]: from='client.15114 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:13:24.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:13:24 vm04 ceph-mon[46823]: from='client.15114 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:13:25.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:13:25 vm10 ceph-mon[48982]: pgmap v44: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:13:25.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:13:25 vm10 ceph-mon[48982]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:13:25.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:13:25 vm04 ceph-mon[51053]: pgmap v44: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:13:25.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:13:25 vm04 ceph-mon[51053]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:13:25.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:13:25 vm04 ceph-mon[46823]: pgmap v44: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:13:25.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:13:25 vm04 ceph-mon[46823]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:13:26.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:13:26 vm10 ceph-mon[48982]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:13:26.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:13:26 vm10 ceph-mon[48982]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:13:26.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:13:26 vm10 ceph-mon[48982]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:13:26.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:13:26 vm04 ceph-mon[46823]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:13:26.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:13:26 vm04 ceph-mon[46823]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:13:26.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:13:26 vm04 ceph-mon[46823]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:13:26.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:13:26 vm04 ceph-mon[51053]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:13:26.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:13:26 vm04 ceph-mon[51053]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:13:26.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:13:26 vm04 ceph-mon[51053]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:13:27.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:13:26 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:13:26.979Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:13:27.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:13:26 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:13:26.979Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:13:27.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:13:27 vm04 ceph-mon[46823]: pgmap v45: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:13:27.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:13:27 vm04 ceph-mon[51053]: pgmap v45: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:13:27.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:13:27 vm10 ceph-mon[48982]: pgmap v45: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:13:29.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:13:29 vm10 ceph-mon[48982]: pgmap v46: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:13:29.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:13:29 vm04 ceph-mon[46823]: pgmap v46: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:13:29.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:13:29 vm04 ceph-mon[51053]: pgmap v46: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:13:30.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:13:30 vm10 ceph-mon[48982]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:13:30.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:13:30 vm04 ceph-mon[46823]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:13:30.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:13:30 vm04 ceph-mon[51053]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:13:31.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:13:31 vm10 ceph-mon[48982]: pgmap v47: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:13:31.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:13:31 vm04 ceph-mon[46823]: pgmap v47: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:13:31.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:13:31 vm04 ceph-mon[51053]: pgmap v47: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:13:33.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:13:33 vm10 ceph-mon[48982]: pgmap v48: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:13:33.578 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:13:33 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:13:33] "GET /metrics HTTP/1.1" 200 37546 "" "Prometheus/2.51.0" 2026-03-09T00:13:33.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:13:33 vm04 ceph-mon[46823]: pgmap v48: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:13:33.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:13:33 vm04 ceph-mon[51053]: pgmap v48: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:13:34.479 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:13:34 vm10 ceph-mon[48982]: from='client.15114 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:13:34.502 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:13:34 vm04 ceph-mon[46823]: from='client.15114 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:13:34.502 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:13:34 vm04 ceph-mon[51053]: from='client.15114 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:13:35.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:13:35 vm10 ceph-mon[48982]: pgmap v49: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:13:35.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:13:35 vm04 ceph-mon[46823]: pgmap v49: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:13:35.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:13:35 vm04 ceph-mon[51053]: pgmap v49: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:13:37.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:13:36 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:13:36.979Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:13:37.351 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:13:36 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:13:36.980Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:13:37.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:13:37 vm04 ceph-mon[46823]: pgmap v50: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:13:37.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:13:37 vm04 ceph-mon[51053]: pgmap v50: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:13:37.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:13:37 vm10 ceph-mon[48982]: pgmap v50: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:13:39.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:13:39 vm10 ceph-mon[48982]: pgmap v51: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:13:39.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:13:39 vm04 ceph-mon[46823]: pgmap v51: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:13:39.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:13:39 vm04 ceph-mon[51053]: pgmap v51: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:13:41.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:13:41 vm10 ceph-mon[48982]: pgmap v52: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:13:41.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:13:41 vm04 ceph-mon[46823]: pgmap v52: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:13:41.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:13:41 vm04 ceph-mon[51053]: pgmap v52: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:13:43.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:13:43 vm10 ceph-mon[48982]: pgmap v53: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:13:43.578 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:13:43 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:13:43] "GET /metrics HTTP/1.1" 200 37546 "" "Prometheus/2.51.0" 2026-03-09T00:13:43.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:13:43 vm04 ceph-mon[46823]: pgmap v53: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:13:43.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:13:43 vm04 ceph-mon[51053]: pgmap v53: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:13:44.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:13:44 vm10 ceph-mon[48982]: from='client.15114 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:13:44.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:13:44 vm04 ceph-mon[46823]: from='client.15114 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:13:44.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:13:44 vm04 ceph-mon[51053]: from='client.15114 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:13:45.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:13:45 vm10 ceph-mon[48982]: pgmap v54: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:13:45.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:13:45 vm10 ceph-mon[48982]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:13:45.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:13:45 vm04 ceph-mon[46823]: pgmap v54: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:13:45.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:13:45 vm04 ceph-mon[46823]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:13:45.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:13:45 vm04 ceph-mon[51053]: pgmap v54: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:13:45.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:13:45 vm04 ceph-mon[51053]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:13:47.351 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:13:46 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:13:46.980Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:13:47.351 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:13:46 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:13:46.981Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:13:47.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:13:47 vm04 ceph-mon[46823]: pgmap v55: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:13:47.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:13:47 vm04 ceph-mon[51053]: pgmap v55: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:13:47.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:13:47 vm10 ceph-mon[48982]: pgmap v55: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:13:49.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:13:49 vm10 ceph-mon[48982]: pgmap v56: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:13:49.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:13:49 vm04 ceph-mon[46823]: pgmap v56: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:13:49.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:13:49 vm04 ceph-mon[51053]: pgmap v56: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:13:51.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:13:51 vm10 ceph-mon[48982]: pgmap v57: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:13:51.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:13:51 vm04 ceph-mon[46823]: pgmap v57: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:13:51.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:13:51 vm04 ceph-mon[51053]: pgmap v57: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:13:53.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:13:53 vm10 ceph-mon[48982]: pgmap v58: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:13:53.578 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:13:53 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:13:53] "GET /metrics HTTP/1.1" 200 37548 "" "Prometheus/2.51.0" 2026-03-09T00:13:53.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:13:53 vm04 ceph-mon[46823]: pgmap v58: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:13:53.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:13:53 vm04 ceph-mon[51053]: pgmap v58: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:13:54.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:13:54 vm10 ceph-mon[48982]: from='client.15114 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:13:54.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:13:54 vm04 ceph-mon[46823]: from='client.15114 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:13:54.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:13:54 vm04 ceph-mon[51053]: from='client.15114 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:13:55.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:13:55 vm10 ceph-mon[48982]: pgmap v59: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:13:55.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:13:55 vm04 ceph-mon[46823]: pgmap v59: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:13:55.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:13:55 vm04 ceph-mon[51053]: pgmap v59: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:13:57.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:13:56 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:13:56.980Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:13:57.351 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:13:56 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:13:56.981Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:13:57.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:13:57 vm04 ceph-mon[46823]: pgmap v60: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:13:57.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:13:57 vm04 ceph-mon[51053]: pgmap v60: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:13:57.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:13:57 vm10 ceph-mon[48982]: pgmap v60: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:13:59.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:13:59 vm04 ceph-mon[46823]: pgmap v61: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:13:59.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:13:59 vm04 ceph-mon[51053]: pgmap v61: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:13:59.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:13:59 vm10 ceph-mon[48982]: pgmap v61: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:14:00.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:14:00 vm10 ceph-mon[48982]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:14:00.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:14:00 vm04 ceph-mon[46823]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:14:00.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:14:00 vm04 ceph-mon[51053]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:14:01.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:14:01 vm10 ceph-mon[48982]: pgmap v62: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:14:01.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:14:01 vm04 ceph-mon[46823]: pgmap v62: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:14:01.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:14:01 vm04 ceph-mon[51053]: pgmap v62: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:14:03.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:14:03 vm10 ceph-mon[48982]: pgmap v63: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:14:03.578 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:14:03 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:14:03] "GET /metrics HTTP/1.1" 200 37546 "" "Prometheus/2.51.0" 2026-03-09T00:14:03.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:14:03 vm04 ceph-mon[46823]: pgmap v63: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:14:03.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:14:03 vm04 ceph-mon[51053]: pgmap v63: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:14:04.479 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:14:04 vm10 ceph-mon[48982]: from='client.15114 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:14:04.502 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:14:04 vm04 ceph-mon[46823]: from='client.15114 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:14:04.502 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:14:04 vm04 ceph-mon[51053]: from='client.15114 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:14:05.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:14:05 vm10 ceph-mon[48982]: pgmap v64: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:14:05.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:14:05 vm04 ceph-mon[46823]: pgmap v64: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:14:05.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:14:05 vm04 ceph-mon[51053]: pgmap v64: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:14:07.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:14:06 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:14:06.981Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:14:07.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:14:06 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:14:06.982Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:14:07.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:14:07 vm04 ceph-mon[46823]: pgmap v65: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:14:07.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:14:07 vm04 ceph-mon[51053]: pgmap v65: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:14:07.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:14:07 vm10 ceph-mon[48982]: pgmap v65: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:14:09.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:14:09 vm10 ceph-mon[48982]: pgmap v66: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:14:09.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:14:09 vm04 ceph-mon[51053]: pgmap v66: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:14:09.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:14:09 vm04 ceph-mon[46823]: pgmap v66: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:14:11.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:14:11 vm10 ceph-mon[48982]: pgmap v67: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:14:11.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:14:11 vm04 ceph-mon[51053]: pgmap v67: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:14:11.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:14:11 vm04 ceph-mon[46823]: pgmap v67: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:14:13.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:14:13 vm10 ceph-mon[48982]: pgmap v68: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:14:13.578 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:14:13 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:14:13] "GET /metrics HTTP/1.1" 200 37546 "" "Prometheus/2.51.0" 2026-03-09T00:14:13.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:14:13 vm04 ceph-mon[51053]: pgmap v68: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:14:13.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:14:13 vm04 ceph-mon[46823]: pgmap v68: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:14:14.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:14:14 vm10 ceph-mon[48982]: from='client.15114 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:14:14.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:14:14 vm04 ceph-mon[51053]: from='client.15114 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:14:14.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:14:14 vm04 ceph-mon[46823]: from='client.15114 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:14:15.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:14:15 vm10 ceph-mon[48982]: pgmap v69: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:14:15.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:14:15 vm10 ceph-mon[48982]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:14:15.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:14:15 vm04 ceph-mon[51053]: pgmap v69: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:14:15.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:14:15 vm04 ceph-mon[51053]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:14:15.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:14:15 vm04 ceph-mon[46823]: pgmap v69: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:14:15.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:14:15 vm04 ceph-mon[46823]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:14:17.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:14:16 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:14:16.982Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:14:17.351 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:14:16 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:14:16.982Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:14:17.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:14:17 vm04 ceph-mon[51053]: pgmap v70: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:14:17.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:14:17 vm04 ceph-mon[46823]: pgmap v70: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:14:17.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:14:17 vm10 ceph-mon[48982]: pgmap v70: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:14:19.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:14:19 vm10 ceph-mon[48982]: pgmap v71: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:14:19.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:14:19 vm04 ceph-mon[51053]: pgmap v71: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:14:19.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:14:19 vm04 ceph-mon[46823]: pgmap v71: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:14:21.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:14:21 vm10 ceph-mon[48982]: pgmap v72: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:14:21.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:14:21 vm04 ceph-mon[51053]: pgmap v72: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:14:21.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:14:21 vm04 ceph-mon[46823]: pgmap v72: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:14:23.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:14:23 vm10 ceph-mon[48982]: pgmap v73: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:14:23.578 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:14:23 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:14:23] "GET /metrics HTTP/1.1" 200 37542 "" "Prometheus/2.51.0" 2026-03-09T00:14:23.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:14:23 vm04 ceph-mon[51053]: pgmap v73: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:14:23.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:14:23 vm04 ceph-mon[46823]: pgmap v73: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:14:24.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:14:24 vm10 ceph-mon[48982]: from='client.15114 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:14:24.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:14:24 vm04 ceph-mon[51053]: from='client.15114 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:14:24.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:14:24 vm04 ceph-mon[46823]: from='client.15114 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:14:25.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:14:25 vm10 ceph-mon[48982]: pgmap v74: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:14:25.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:14:25 vm04 ceph-mon[46823]: pgmap v74: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:14:25.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:14:25 vm04 ceph-mon[51053]: pgmap v74: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:14:26.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:14:26 vm10 ceph-mon[48982]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:14:26.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:14:26 vm10 ceph-mon[48982]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:14:26.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:14:26 vm10 ceph-mon[48982]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:14:26.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:14:26 vm10 ceph-mon[48982]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:14:26.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:14:26 vm04 ceph-mon[51053]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:14:26.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:14:26 vm04 ceph-mon[51053]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:14:26.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:14:26 vm04 ceph-mon[51053]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:14:26.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:14:26 vm04 ceph-mon[51053]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:14:26.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:14:26 vm04 ceph-mon[46823]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:14:26.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:14:26 vm04 ceph-mon[46823]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:14:26.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:14:26 vm04 ceph-mon[46823]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:14:26.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:14:26 vm04 ceph-mon[46823]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:14:27.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:14:26 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:14:26.983Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:14:27.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:14:26 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:14:26.983Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:14:27.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:14:27 vm04 ceph-mon[51053]: pgmap v75: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:14:27.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:14:27 vm04 ceph-mon[46823]: pgmap v75: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:14:27.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:14:27 vm10 ceph-mon[48982]: pgmap v75: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:14:29.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:14:29 vm10 ceph-mon[48982]: pgmap v76: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:14:29.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:14:29 vm04 ceph-mon[51053]: pgmap v76: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:14:29.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:14:29 vm04 ceph-mon[46823]: pgmap v76: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:14:30.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:14:30 vm10 ceph-mon[48982]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:14:30.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:14:30 vm04 ceph-mon[51053]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:14:30.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:14:30 vm04 ceph-mon[46823]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:14:31.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:14:31 vm10 ceph-mon[48982]: pgmap v77: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:14:31.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:14:31 vm04 ceph-mon[51053]: pgmap v77: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:14:31.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:14:31 vm04 ceph-mon[46823]: pgmap v77: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:14:33.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:14:33 vm10 ceph-mon[48982]: pgmap v78: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:14:33.578 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:14:33 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:14:33] "GET /metrics HTTP/1.1" 200 37556 "" "Prometheus/2.51.0" 2026-03-09T00:14:33.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:14:33 vm04 ceph-mon[51053]: pgmap v78: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:14:33.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:14:33 vm04 ceph-mon[46823]: pgmap v78: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:14:34.479 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:14:34 vm10 ceph-mon[48982]: from='client.15114 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:14:34.502 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:14:34 vm04 ceph-mon[51053]: from='client.15114 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:14:34.502 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:14:34 vm04 ceph-mon[46823]: from='client.15114 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:14:35.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:14:35 vm10 ceph-mon[48982]: pgmap v79: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:14:35.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:14:35 vm04 ceph-mon[51053]: pgmap v79: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:14:35.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:14:35 vm04 ceph-mon[46823]: pgmap v79: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:14:37.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:14:36 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:14:36.983Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:14:37.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:14:36 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:14:36.983Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:14:37.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:14:37 vm04 ceph-mon[51053]: pgmap v80: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:14:37.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:14:37 vm04 ceph-mon[46823]: pgmap v80: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:14:37.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:14:37 vm10 ceph-mon[48982]: pgmap v80: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:14:39.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:14:39 vm10 ceph-mon[48982]: pgmap v81: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:14:39.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:14:39 vm04 ceph-mon[51053]: pgmap v81: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:14:39.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:14:39 vm04 ceph-mon[46823]: pgmap v81: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:14:41.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:14:41 vm10 ceph-mon[48982]: pgmap v82: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:14:41.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:14:41 vm04 ceph-mon[51053]: pgmap v82: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:14:41.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:14:41 vm04 ceph-mon[46823]: pgmap v82: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:14:43.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:14:43 vm10 ceph-mon[48982]: pgmap v83: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:14:43.578 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:14:43 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:14:43] "GET /metrics HTTP/1.1" 200 37556 "" "Prometheus/2.51.0" 2026-03-09T00:14:43.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:14:43 vm04 ceph-mon[51053]: pgmap v83: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:14:43.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:14:43 vm04 ceph-mon[46823]: pgmap v83: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:14:44.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:14:44 vm10 ceph-mon[48982]: from='client.15114 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:14:44.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:14:44 vm04 ceph-mon[51053]: from='client.15114 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:14:44.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:14:44 vm04 ceph-mon[46823]: from='client.15114 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:14:45.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:14:45 vm10 ceph-mon[48982]: pgmap v84: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:14:45.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:14:45 vm10 ceph-mon[48982]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:14:45.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:14:45 vm04 ceph-mon[51053]: pgmap v84: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:14:45.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:14:45 vm04 ceph-mon[51053]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:14:45.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:14:45 vm04 ceph-mon[46823]: pgmap v84: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:14:45.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:14:45 vm04 ceph-mon[46823]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:14:47.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:14:46 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:14:46.983Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:14:47.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:14:46 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:14:46.984Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:14:47.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:14:47 vm04 ceph-mon[51053]: pgmap v85: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:14:47.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:14:47 vm04 ceph-mon[46823]: pgmap v85: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:14:47.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:14:47 vm10 ceph-mon[48982]: pgmap v85: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:14:49.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:14:49 vm10 ceph-mon[48982]: pgmap v86: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:14:49.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:14:49 vm04 ceph-mon[51053]: pgmap v86: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:14:49.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:14:49 vm04 ceph-mon[46823]: pgmap v86: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:14:51.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:14:51 vm10 ceph-mon[48982]: pgmap v87: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:14:51.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:14:51 vm04 ceph-mon[51053]: pgmap v87: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:14:51.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:14:51 vm04 ceph-mon[46823]: pgmap v87: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:14:53.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:14:53 vm10 ceph-mon[48982]: pgmap v88: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:14:53.578 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:14:53 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:14:53] "GET /metrics HTTP/1.1" 200 37551 "" "Prometheus/2.51.0" 2026-03-09T00:14:53.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:14:53 vm04 ceph-mon[51053]: pgmap v88: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:14:53.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:14:53 vm04 ceph-mon[46823]: pgmap v88: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:14:54.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:14:54 vm10 ceph-mon[48982]: from='client.15114 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:14:54.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:14:54 vm04 ceph-mon[51053]: from='client.15114 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:14:54.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:14:54 vm04 ceph-mon[46823]: from='client.15114 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:14:55.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:14:55 vm10 ceph-mon[48982]: pgmap v89: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:14:55.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:14:55 vm04 ceph-mon[51053]: pgmap v89: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:14:55.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:14:55 vm04 ceph-mon[46823]: pgmap v89: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:14:57.245 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:14:56 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:14:56.984Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:14:57.245 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:14:56 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:14:56.985Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:14:57.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:14:57 vm10 ceph-mon[48982]: pgmap v90: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:14:57.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:14:57 vm04 ceph-mon[51053]: pgmap v90: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:14:57.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:14:57 vm04 ceph-mon[46823]: pgmap v90: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:14:59.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:14:59 vm10 ceph-mon[48982]: pgmap v91: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:14:59.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:14:59 vm04 ceph-mon[51053]: pgmap v91: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:14:59.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:14:59 vm04 ceph-mon[46823]: pgmap v91: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:15:00.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:00 vm10 ceph-mon[48982]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:15:00.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:00 vm04 ceph-mon[51053]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:15:00.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:00 vm04 ceph-mon[46823]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:15:01.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:01 vm10 ceph-mon[48982]: pgmap v92: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:15:01.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:01 vm04 ceph-mon[51053]: pgmap v92: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:15:01.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:01 vm04 ceph-mon[46823]: pgmap v92: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:15:03.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:03 vm10 ceph-mon[48982]: pgmap v93: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:15:03.578 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:15:03 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:15:03] "GET /metrics HTTP/1.1" 200 37558 "" "Prometheus/2.51.0" 2026-03-09T00:15:03.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:03 vm04 ceph-mon[51053]: pgmap v93: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:15:03.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:03 vm04 ceph-mon[46823]: pgmap v93: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:15:04.479 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:04 vm10 ceph-mon[48982]: from='client.15114 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:15:04.502 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:04 vm04 ceph-mon[51053]: from='client.15114 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:15:04.502 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:04 vm04 ceph-mon[46823]: from='client.15114 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:15:05.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:05 vm10 ceph-mon[48982]: pgmap v94: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:15:05.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:05 vm04 ceph-mon[51053]: pgmap v94: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:15:05.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:05 vm04 ceph-mon[46823]: pgmap v94: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:15:07.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:15:06 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:15:06.985Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:15:07.351 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:15:06 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:15:06.985Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:15:07.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:07 vm04 ceph-mon[51053]: pgmap v95: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:15:07.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:07 vm04 ceph-mon[46823]: pgmap v95: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:15:07.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:07 vm10 ceph-mon[48982]: pgmap v95: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:15:09.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:09 vm10 ceph-mon[48982]: pgmap v96: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:15:09.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:09 vm04 ceph-mon[51053]: pgmap v96: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:15:09.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:09 vm04 ceph-mon[46823]: pgmap v96: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:15:11.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:11 vm10 ceph-mon[48982]: pgmap v97: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:15:11.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:11 vm04 ceph-mon[51053]: pgmap v97: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:15:11.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:11 vm04 ceph-mon[46823]: pgmap v97: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:15:13.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:13 vm10 ceph-mon[48982]: pgmap v98: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:15:13.578 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:15:13 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:15:13] "GET /metrics HTTP/1.1" 200 37558 "" "Prometheus/2.51.0" 2026-03-09T00:15:13.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:13 vm04 ceph-mon[51053]: pgmap v98: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:15:13.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:13 vm04 ceph-mon[46823]: pgmap v98: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:15:14.526 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:14 vm04 ceph-mon[51053]: from='client.15114 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:15:14.526 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:14 vm04 ceph-mon[46823]: from='client.15114 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:15:14.563 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch ps' 2026-03-09T00:15:14.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:14 vm10 ceph-mon[48982]: from='client.15114 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:15:15.060 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-09T00:15:15.060 INFO:teuthology.orchestra.run.vm04.stdout:alertmanager.a vm04 *:9093,9094 running (12m) 2m ago 18m 26.0M - 0.25.0 c8568f914cd2 bcac0140b0f6 2026-03-09T00:15:15.060 INFO:teuthology.orchestra.run.vm04.stdout:grafana.a vm10 *:3000 running (11m) 2m ago 18m 48.4M - dad864ee21e9 9fb25843918b 2026-03-09T00:15:15.060 INFO:teuthology.orchestra.run.vm04.stdout:iscsi.foo.vm04.fbyciv vm04 running (2m) 2m ago 18m 69.3M - 3.5 e1d6a67b021e 27ffed7ff8e7 2026-03-09T00:15:15.060 INFO:teuthology.orchestra.run.vm04.stdout:mgr.x vm10 *:8443,9283 running (14m) 2m ago 20m 544M - 19.2.3-678-ge911bdeb 654f31e6858e 2d7d59a967f3 2026-03-09T00:15:15.061 INFO:teuthology.orchestra.run.vm04.stdout:mgr.y vm04 *:8443,9283,8765 running (9m) 2m ago 21m 485M - 19.2.3-678-ge911bdeb 654f31e6858e 72a51572b51b 2026-03-09T00:15:15.061 INFO:teuthology.orchestra.run.vm04.stdout:mon.a vm04 running (21m) 2m ago 21m 71.8M 2048M 17.2.0 e1d6a67b021e a0a441d060f5 2026-03-09T00:15:15.061 INFO:teuthology.orchestra.run.vm04.stdout:mon.b vm10 running (20m) 2m ago 20m 50.4M 2048M 17.2.0 e1d6a67b021e a4c3c4f2dde9 2026-03-09T00:15:15.061 INFO:teuthology.orchestra.run.vm04.stdout:mon.c vm04 running (20m) 2m ago 20m 50.4M 2048M 17.2.0 e1d6a67b021e 5c2d9165643c 2026-03-09T00:15:15.061 INFO:teuthology.orchestra.run.vm04.stdout:node-exporter.a vm04 *:9100 running (12m) 2m ago 18m 9857k - 1.7.0 72c9c2088986 38e0af6b2fbf 2026-03-09T00:15:15.061 INFO:teuthology.orchestra.run.vm04.stdout:node-exporter.b vm10 *:9100 running (11m) 2m ago 18m 10.0M - 1.7.0 72c9c2088986 d059c0022310 2026-03-09T00:15:15.061 INFO:teuthology.orchestra.run.vm04.stdout:osd.0 vm04 running (20m) 2m ago 20m 56.5M 4096M 17.2.0 e1d6a67b021e eb4d6ee04c91 2026-03-09T00:15:15.061 INFO:teuthology.orchestra.run.vm04.stdout:osd.1 vm04 running (20m) 2m ago 20m 54.7M 4096M 17.2.0 e1d6a67b021e f112f05700b8 2026-03-09T00:15:15.061 INFO:teuthology.orchestra.run.vm04.stdout:osd.2 vm04 running (20m) 2m ago 20m 51.9M 4096M 17.2.0 e1d6a67b021e a4ed5ecab7e4 2026-03-09T00:15:15.061 INFO:teuthology.orchestra.run.vm04.stdout:osd.3 vm04 running (19m) 2m ago 19m 53.9M 4096M 17.2.0 e1d6a67b021e d530f6e786d9 2026-03-09T00:15:15.061 INFO:teuthology.orchestra.run.vm04.stdout:osd.4 vm10 running (19m) 2m ago 19m 54.7M 4096M 17.2.0 e1d6a67b021e ad302e6f363c 2026-03-09T00:15:15.061 INFO:teuthology.orchestra.run.vm04.stdout:osd.5 vm10 running (19m) 2m ago 19m 53.6M 4096M 17.2.0 e1d6a67b021e 1bef86fdb303 2026-03-09T00:15:15.061 INFO:teuthology.orchestra.run.vm04.stdout:osd.6 vm10 running (19m) 2m ago 19m 51.5M 4096M 17.2.0 e1d6a67b021e 168db5828111 2026-03-09T00:15:15.061 INFO:teuthology.orchestra.run.vm04.stdout:osd.7 vm10 running (19m) 2m ago 19m 56.0M 4096M 17.2.0 e1d6a67b021e bc6bbac15079 2026-03-09T00:15:15.061 INFO:teuthology.orchestra.run.vm04.stdout:prometheus.a vm10 *:9095 running (2m) 2m ago 18m 46.4M - 2.51.0 1d3b7f56885b 77372237e49c 2026-03-09T00:15:15.061 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.ehrfsf vm04 *:8000 running (18m) 2m ago 18m 97.7M - 17.2.0 e1d6a67b021e a815abb0c790 2026-03-09T00:15:15.061 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm10.dwizvi vm10 *:8000 running (18m) 2m ago 18m 95.6M - 17.2.0 e1d6a67b021e f6412acdf6e0 2026-03-09T00:15:15.127 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch ls' 2026-03-09T00:15:15.312 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:15 vm04 ceph-mon[46823]: pgmap v99: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:15:15.312 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:15 vm04 ceph-mon[46823]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:15:15.312 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:15 vm04 ceph-mon[51053]: pgmap v99: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:15:15.312 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:15 vm04 ceph-mon[51053]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:15:15.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:15 vm10 ceph-mon[48982]: pgmap v99: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:15:15.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:15 vm10 ceph-mon[48982]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:15:15.587 INFO:teuthology.orchestra.run.vm04.stdout:NAME PORTS RUNNING REFRESHED AGE PLACEMENT 2026-03-09T00:15:15.587 INFO:teuthology.orchestra.run.vm04.stdout:alertmanager ?:9093,9094 1/1 2m ago 18m vm04=a;count:1 2026-03-09T00:15:15.587 INFO:teuthology.orchestra.run.vm04.stdout:grafana ?:3000 1/1 2m ago 18m vm10=a;count:1 2026-03-09T00:15:15.587 INFO:teuthology.orchestra.run.vm04.stdout:iscsi.foo ?:5000 1/1 2m ago 18m count:1 2026-03-09T00:15:15.587 INFO:teuthology.orchestra.run.vm04.stdout:mgr 2/2 2m ago 20m vm04=y;vm10=x;count:2 2026-03-09T00:15:15.587 INFO:teuthology.orchestra.run.vm04.stdout:mon 3/3 2m ago 20m vm04:192.168.123.104=a;vm04:[v2:192.168.123.104:3301,v1:192.168.123.104:6790]=c;vm10:192.168.123.110=b;count:3 2026-03-09T00:15:15.587 INFO:teuthology.orchestra.run.vm04.stdout:node-exporter ?:9100 2/2 2m ago 19m vm04=a;vm10=b;count:2 2026-03-09T00:15:15.587 INFO:teuthology.orchestra.run.vm04.stdout:osd 8 2m ago - 2026-03-09T00:15:15.587 INFO:teuthology.orchestra.run.vm04.stdout:prometheus ?:9095 1/1 2m ago 19m vm10=a;count:1 2026-03-09T00:15:15.587 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo ?:8000 2/2 2m ago 18m count:2 2026-03-09T00:15:15.647 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph versions' 2026-03-09T00:15:16.175 INFO:teuthology.orchestra.run.vm04.stdout:{ 2026-03-09T00:15:16.176 INFO:teuthology.orchestra.run.vm04.stdout: "mon": { 2026-03-09T00:15:16.176 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 3 2026-03-09T00:15:16.176 INFO:teuthology.orchestra.run.vm04.stdout: }, 2026-03-09T00:15:16.176 INFO:teuthology.orchestra.run.vm04.stdout: "mgr": { 2026-03-09T00:15:16.176 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-09T00:15:16.176 INFO:teuthology.orchestra.run.vm04.stdout: }, 2026-03-09T00:15:16.176 INFO:teuthology.orchestra.run.vm04.stdout: "osd": { 2026-03-09T00:15:16.176 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 8 2026-03-09T00:15:16.176 INFO:teuthology.orchestra.run.vm04.stdout: }, 2026-03-09T00:15:16.176 INFO:teuthology.orchestra.run.vm04.stdout: "mds": {}, 2026-03-09T00:15:16.176 INFO:teuthology.orchestra.run.vm04.stdout: "rgw": { 2026-03-09T00:15:16.176 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 2 2026-03-09T00:15:16.176 INFO:teuthology.orchestra.run.vm04.stdout: }, 2026-03-09T00:15:16.176 INFO:teuthology.orchestra.run.vm04.stdout: "overall": { 2026-03-09T00:15:16.176 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 13, 2026-03-09T00:15:16.176 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-09T00:15:16.176 INFO:teuthology.orchestra.run.vm04.stdout: } 2026-03-09T00:15:16.176 INFO:teuthology.orchestra.run.vm04.stdout:} 2026-03-09T00:15:16.233 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types mgr' 2026-03-09T00:15:16.419 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:16 vm04 ceph-mon[46823]: from='client.25066 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:15:16.419 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:16 vm04 ceph-mon[51053]: from='client.25066 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:15:16.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:16 vm10 ceph-mon[48982]: from='client.25066 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:15:17.351 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:15:16 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:15:16.985Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:15:17.351 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:15:17 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:15:17.009Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:15:17.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:17 vm04 ceph-mon[51053]: from='client.15156 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:15:17.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:17 vm04 ceph-mon[51053]: pgmap v100: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:15:17.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:17 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/4029313852' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:15:17.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:17 vm04 ceph-mon[46823]: from='client.15156 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:15:17.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:17 vm04 ceph-mon[46823]: pgmap v100: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:15:17.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:17 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/4029313852' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:15:17.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:17 vm10 ceph-mon[48982]: from='client.15156 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:15:17.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:17 vm10 ceph-mon[48982]: pgmap v100: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:15:17.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:17 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/4029313852' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:15:18.415 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:18 vm04 ceph-mon[46823]: from='client.15168 -' entity='client.admin' cmd=[{"prefix": "orch upgrade start", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "daemon_types": "mgr", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:15:18.415 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:18 vm04 ceph-mon[51053]: from='client.15168 -' entity='client.admin' cmd=[{"prefix": "orch upgrade start", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "daemon_types": "mgr", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:15:18.517 INFO:teuthology.orchestra.run.vm04.stdout:Initiating upgrade to quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T00:15:18.575 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'while ceph orch upgrade status | jq '"'"'.in_progress'"'"' | grep true && ! ceph orch upgrade status | jq '"'"'.message'"'"' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; sleep 30 ; done' 2026-03-09T00:15:18.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:18 vm10 ceph-mon[48982]: from='client.15168 -' entity='client.admin' cmd=[{"prefix": "orch upgrade start", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "daemon_types": "mgr", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:15:19.114 INFO:teuthology.orchestra.run.vm04.stdout:true 2026-03-09T00:15:19.531 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:19 vm04 ceph-mon[46823]: pgmap v101: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:15:19.531 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:19 vm04 ceph-mon[46823]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:15:19.531 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:19 vm04 ceph-mon[46823]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:15:19.531 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:19 vm04 ceph-mon[46823]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:15:19.531 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:19 vm04 ceph-mon[46823]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:15:19.531 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:19 vm04 ceph-mon[46823]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:15:19.531 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:19 vm04 ceph-mon[46823]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:15:19.531 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:19 vm04 ceph-mon[46823]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:15:19.531 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:19 vm04 ceph-mon[46823]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:15:19.531 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:19 vm04 ceph-mon[46823]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:15:19.531 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:19 vm04 ceph-mon[51053]: pgmap v101: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:15:19.531 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:19 vm04 ceph-mon[51053]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:15:19.532 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:19 vm04 ceph-mon[51053]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:15:19.532 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:19 vm04 ceph-mon[51053]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:15:19.532 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:19 vm04 ceph-mon[51053]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:15:19.532 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:19 vm04 ceph-mon[51053]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:15:19.532 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:19 vm04 ceph-mon[51053]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:15:19.532 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:19 vm04 ceph-mon[51053]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:15:19.532 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:19 vm04 ceph-mon[51053]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:15:19.532 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:19 vm04 ceph-mon[51053]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:15:19.532 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-09T00:15:19.532 INFO:teuthology.orchestra.run.vm04.stdout:alertmanager.a vm04 *:9093,9094 running (12m) 2m ago 18m 26.0M - 0.25.0 c8568f914cd2 bcac0140b0f6 2026-03-09T00:15:19.532 INFO:teuthology.orchestra.run.vm04.stdout:grafana.a vm10 *:3000 running (11m) 2m ago 18m 48.4M - dad864ee21e9 9fb25843918b 2026-03-09T00:15:19.532 INFO:teuthology.orchestra.run.vm04.stdout:iscsi.foo.vm04.fbyciv vm04 running (2m) 2m ago 18m 69.3M - 3.5 e1d6a67b021e 27ffed7ff8e7 2026-03-09T00:15:19.532 INFO:teuthology.orchestra.run.vm04.stdout:mgr.x vm10 *:8443,9283 running (14m) 2m ago 20m 544M - 19.2.3-678-ge911bdeb 654f31e6858e 2d7d59a967f3 2026-03-09T00:15:19.532 INFO:teuthology.orchestra.run.vm04.stdout:mgr.y vm04 *:8443,9283,8765 running (9m) 2m ago 21m 485M - 19.2.3-678-ge911bdeb 654f31e6858e 72a51572b51b 2026-03-09T00:15:19.532 INFO:teuthology.orchestra.run.vm04.stdout:mon.a vm04 running (21m) 2m ago 21m 71.8M 2048M 17.2.0 e1d6a67b021e a0a441d060f5 2026-03-09T00:15:19.532 INFO:teuthology.orchestra.run.vm04.stdout:mon.b vm10 running (20m) 2m ago 20m 50.4M 2048M 17.2.0 e1d6a67b021e a4c3c4f2dde9 2026-03-09T00:15:19.532 INFO:teuthology.orchestra.run.vm04.stdout:mon.c vm04 running (20m) 2m ago 20m 50.4M 2048M 17.2.0 e1d6a67b021e 5c2d9165643c 2026-03-09T00:15:19.532 INFO:teuthology.orchestra.run.vm04.stdout:node-exporter.a vm04 *:9100 running (12m) 2m ago 18m 9857k - 1.7.0 72c9c2088986 38e0af6b2fbf 2026-03-09T00:15:19.532 INFO:teuthology.orchestra.run.vm04.stdout:node-exporter.b vm10 *:9100 running (12m) 2m ago 18m 10.0M - 1.7.0 72c9c2088986 d059c0022310 2026-03-09T00:15:19.532 INFO:teuthology.orchestra.run.vm04.stdout:osd.0 vm04 running (20m) 2m ago 20m 56.5M 4096M 17.2.0 e1d6a67b021e eb4d6ee04c91 2026-03-09T00:15:19.532 INFO:teuthology.orchestra.run.vm04.stdout:osd.1 vm04 running (20m) 2m ago 20m 54.7M 4096M 17.2.0 e1d6a67b021e f112f05700b8 2026-03-09T00:15:19.532 INFO:teuthology.orchestra.run.vm04.stdout:osd.2 vm04 running (20m) 2m ago 20m 51.9M 4096M 17.2.0 e1d6a67b021e a4ed5ecab7e4 2026-03-09T00:15:19.532 INFO:teuthology.orchestra.run.vm04.stdout:osd.3 vm04 running (19m) 2m ago 19m 53.9M 4096M 17.2.0 e1d6a67b021e d530f6e786d9 2026-03-09T00:15:19.532 INFO:teuthology.orchestra.run.vm04.stdout:osd.4 vm10 running (19m) 2m ago 19m 54.7M 4096M 17.2.0 e1d6a67b021e ad302e6f363c 2026-03-09T00:15:19.532 INFO:teuthology.orchestra.run.vm04.stdout:osd.5 vm10 running (19m) 2m ago 19m 53.6M 4096M 17.2.0 e1d6a67b021e 1bef86fdb303 2026-03-09T00:15:19.532 INFO:teuthology.orchestra.run.vm04.stdout:osd.6 vm10 running (19m) 2m ago 19m 51.5M 4096M 17.2.0 e1d6a67b021e 168db5828111 2026-03-09T00:15:19.532 INFO:teuthology.orchestra.run.vm04.stdout:osd.7 vm10 running (19m) 2m ago 19m 56.0M 4096M 17.2.0 e1d6a67b021e bc6bbac15079 2026-03-09T00:15:19.532 INFO:teuthology.orchestra.run.vm04.stdout:prometheus.a vm10 *:9095 running (2m) 2m ago 18m 46.4M - 2.51.0 1d3b7f56885b 77372237e49c 2026-03-09T00:15:19.532 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.ehrfsf vm04 *:8000 running (18m) 2m ago 18m 97.7M - 17.2.0 e1d6a67b021e a815abb0c790 2026-03-09T00:15:19.532 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm10.dwizvi vm10 *:8000 running (18m) 2m ago 18m 95.6M - 17.2.0 e1d6a67b021e f6412acdf6e0 2026-03-09T00:15:19.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:19 vm10 ceph-mon[48982]: pgmap v101: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:15:19.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:19 vm10 ceph-mon[48982]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:15:19.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:19 vm10 ceph-mon[48982]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:15:19.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:19 vm10 ceph-mon[48982]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:15:19.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:19 vm10 ceph-mon[48982]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:15:19.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:19 vm10 ceph-mon[48982]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:15:19.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:19 vm10 ceph-mon[48982]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:15:19.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:19 vm10 ceph-mon[48982]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:15:19.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:19 vm10 ceph-mon[48982]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:15:19.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:19 vm10 ceph-mon[48982]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:15:19.772 INFO:teuthology.orchestra.run.vm04.stdout:{ 2026-03-09T00:15:19.772 INFO:teuthology.orchestra.run.vm04.stdout: "mon": { 2026-03-09T00:15:19.772 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 3 2026-03-09T00:15:19.772 INFO:teuthology.orchestra.run.vm04.stdout: }, 2026-03-09T00:15:19.772 INFO:teuthology.orchestra.run.vm04.stdout: "mgr": { 2026-03-09T00:15:19.772 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-09T00:15:19.772 INFO:teuthology.orchestra.run.vm04.stdout: }, 2026-03-09T00:15:19.772 INFO:teuthology.orchestra.run.vm04.stdout: "osd": { 2026-03-09T00:15:19.772 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 8 2026-03-09T00:15:19.772 INFO:teuthology.orchestra.run.vm04.stdout: }, 2026-03-09T00:15:19.772 INFO:teuthology.orchestra.run.vm04.stdout: "mds": {}, 2026-03-09T00:15:19.772 INFO:teuthology.orchestra.run.vm04.stdout: "rgw": { 2026-03-09T00:15:19.772 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 2 2026-03-09T00:15:19.772 INFO:teuthology.orchestra.run.vm04.stdout: }, 2026-03-09T00:15:19.772 INFO:teuthology.orchestra.run.vm04.stdout: "overall": { 2026-03-09T00:15:19.772 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 13, 2026-03-09T00:15:19.772 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-09T00:15:19.772 INFO:teuthology.orchestra.run.vm04.stdout: } 2026-03-09T00:15:19.772 INFO:teuthology.orchestra.run.vm04.stdout:} 2026-03-09T00:15:19.971 INFO:teuthology.orchestra.run.vm04.stdout:{ 2026-03-09T00:15:19.971 INFO:teuthology.orchestra.run.vm04.stdout: "target_image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", 2026-03-09T00:15:19.971 INFO:teuthology.orchestra.run.vm04.stdout: "in_progress": true, 2026-03-09T00:15:19.971 INFO:teuthology.orchestra.run.vm04.stdout: "which": "Upgrading daemons of type(s) mgr", 2026-03-09T00:15:19.971 INFO:teuthology.orchestra.run.vm04.stdout: "services_complete": [], 2026-03-09T00:15:19.971 INFO:teuthology.orchestra.run.vm04.stdout: "progress": "", 2026-03-09T00:15:19.971 INFO:teuthology.orchestra.run.vm04.stdout: "message": "Doing first pull of quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df image", 2026-03-09T00:15:19.971 INFO:teuthology.orchestra.run.vm04.stdout: "is_paused": false 2026-03-09T00:15:19.971 INFO:teuthology.orchestra.run.vm04.stdout:} 2026-03-09T00:15:20.310 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:20 vm04 ceph-mon[46823]: Upgrade: Started with target quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T00:15:20.310 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:20 vm04 ceph-mon[46823]: Upgrade: First pull of quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T00:15:20.310 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:20 vm04 ceph-mon[46823]: from='client.25072 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:15:20.310 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:20 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/1285598383' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:15:20.311 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:20 vm04 ceph-mon[51053]: Upgrade: Started with target quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T00:15:20.311 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:20 vm04 ceph-mon[51053]: Upgrade: First pull of quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T00:15:20.311 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:20 vm04 ceph-mon[51053]: from='client.25072 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:15:20.311 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:20 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/1285598383' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:15:20.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:20 vm10 ceph-mon[48982]: Upgrade: Started with target quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T00:15:20.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:20 vm10 ceph-mon[48982]: Upgrade: First pull of quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T00:15:20.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:20 vm10 ceph-mon[48982]: from='client.25072 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:15:20.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:20 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/1285598383' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:15:21.496 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:21 vm10 ceph-mon[48982]: from='client.25078 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:15:21.496 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:21 vm10 ceph-mon[48982]: from='client.24971 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:15:21.496 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:21 vm10 ceph-mon[48982]: from='client.25093 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:15:21.496 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:21 vm10 ceph-mon[48982]: pgmap v102: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:15:21.496 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:21 vm10 ceph-mon[48982]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:15:21.496 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:21 vm10 ceph-mon[48982]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:15:21.496 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:21 vm10 ceph-mon[48982]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "mgr fail", "who": "x"}]: dispatch 2026-03-09T00:15:21.496 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:21 vm10 ceph-mon[48982]: from='mgr.24836 ' entity='mgr.x' cmd=[{"prefix": "mgr fail", "who": "x"}]: dispatch 2026-03-09T00:15:21.496 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:21 vm10 ceph-mon[48982]: osdmap e96: 8 total, 8 up, 8 in 2026-03-09T00:15:21.505 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:21 vm04 ceph-mon[51053]: from='client.25078 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:15:21.505 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:21 vm04 ceph-mon[51053]: from='client.24971 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:15:21.505 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:21 vm04 ceph-mon[51053]: from='client.25093 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:15:21.505 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:21 vm04 ceph-mon[51053]: pgmap v102: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:15:21.505 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:21 vm04 ceph-mon[51053]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:15:21.505 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:21 vm04 ceph-mon[51053]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:15:21.505 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:21 vm04 ceph-mon[51053]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "mgr fail", "who": "x"}]: dispatch 2026-03-09T00:15:21.505 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:21 vm04 ceph-mon[51053]: from='mgr.24836 ' entity='mgr.x' cmd=[{"prefix": "mgr fail", "who": "x"}]: dispatch 2026-03-09T00:15:21.505 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:21 vm04 ceph-mon[51053]: osdmap e96: 8 total, 8 up, 8 in 2026-03-09T00:15:21.505 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:21 vm04 ceph-mon[46823]: from='client.25078 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:15:21.505 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:21 vm04 ceph-mon[46823]: from='client.24971 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:15:21.505 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:21 vm04 ceph-mon[46823]: from='client.25093 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:15:21.505 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:21 vm04 ceph-mon[46823]: pgmap v102: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:15:21.505 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:21 vm04 ceph-mon[46823]: from='mgr.24836 ' entity='mgr.x' 2026-03-09T00:15:21.505 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:21 vm04 ceph-mon[46823]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:15:21.505 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:21 vm04 ceph-mon[46823]: from='mgr.24836 192.168.123.110:0/2516654684' entity='mgr.x' cmd=[{"prefix": "mgr fail", "who": "x"}]: dispatch 2026-03-09T00:15:21.505 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:21 vm04 ceph-mon[46823]: from='mgr.24836 ' entity='mgr.x' cmd=[{"prefix": "mgr fail", "who": "x"}]: dispatch 2026-03-09T00:15:21.505 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:21 vm04 ceph-mon[46823]: osdmap e96: 8 total, 8 up, 8 in 2026-03-09T00:15:21.828 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:15:21 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: 2026-03-09T00:15:21.494+0000 7fb827536640 -1 mgr handle_mgr_map I was active but no longer am 2026-03-09T00:15:21.828 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:15:21 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: ignoring --setuser ceph since I am not root 2026-03-09T00:15:21.828 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:15:21 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: ignoring --setgroup ceph since I am not root 2026-03-09T00:15:21.828 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:15:21 vm10 ceph-mgr[67808]: -- 192.168.123.110:0/1595290902 <== mon.2 v2:192.168.123.110:3300/0 4 ==== auth_reply(proto 2 0 (0) Success) ==== 194+0+0 (secure 0 0 0) 0x555e556774a0 con 0x555e55655400 2026-03-09T00:15:21.828 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:15:21 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: 2026-03-09T00:15:21.614+0000 7f550e822140 -1 mgr[py] Module alerts has missing NOTIFY_TYPES member 2026-03-09T00:15:21.828 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:15:21 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: 2026-03-09T00:15:21.659+0000 7f550e822140 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-09T00:15:21.850 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:15:21 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: [09/Mar/2026:00:15:21] ENGINE Bus STOPPING 2026-03-09T00:15:22.269 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:15:21 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: [09/Mar/2026:00:15:21] ENGINE HTTP Server cherrypy._cpwsgi_server.CPWSGIServer(('::', 9283)) shut down 2026-03-09T00:15:22.269 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:15:21 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: [09/Mar/2026:00:15:21] ENGINE Bus STOPPED 2026-03-09T00:15:22.269 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:15:22 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: [09/Mar/2026:00:15:22] ENGINE Bus STARTING 2026-03-09T00:15:22.269 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:15:22 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: [09/Mar/2026:00:15:22] ENGINE Serving on http://:::9283 2026-03-09T00:15:22.269 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:15:22 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: [09/Mar/2026:00:15:22] ENGINE Bus STARTED 2026-03-09T00:15:22.269 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:22 vm04 ceph-mon[51053]: Upgrade: Target is version 19.2.3-678-ge911bdeb (squid) 2026-03-09T00:15:22.269 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:22 vm04 ceph-mon[51053]: Upgrade: Target container is quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, digests ['quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc'] 2026-03-09T00:15:22.270 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:22 vm04 ceph-mon[51053]: Upgrade: Need to upgrade myself (mgr.x) 2026-03-09T00:15:22.270 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:22 vm04 ceph-mon[51053]: Upgrade: Need to upgrade myself (mgr.x) 2026-03-09T00:15:22.270 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:22 vm04 ceph-mon[51053]: Failing over to other MGR 2026-03-09T00:15:22.270 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:22 vm04 ceph-mon[51053]: from='mgr.24836 ' entity='mgr.x' cmd='[{"prefix": "mgr fail", "who": "x"}]': finished 2026-03-09T00:15:22.270 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:22 vm04 ceph-mon[51053]: mgrmap e36: y(active, starting, since 1.00089s) 2026-03-09T00:15:22.270 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:22 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T00:15:22.270 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:22 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T00:15:22.270 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:22 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T00:15:22.270 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:22 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-09T00:15:22.270 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:22 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T00:15:22.270 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:22 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T00:15:22.270 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:22 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T00:15:22.270 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:22 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T00:15:22.270 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:22 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T00:15:22.270 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:22 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T00:15:22.270 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:22 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T00:15:22.270 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:22 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T00:15:22.270 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:22 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-09T00:15:22.270 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:22 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-09T00:15:22.270 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:22 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-09T00:15:22.270 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:22 vm04 ceph-mon[51053]: Manager daemon y is now available 2026-03-09T00:15:22.270 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:22 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:15:22.270 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:22 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:15:22.270 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:22 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:15:22.270 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:22 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T00:15:22.270 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:22 vm04 ceph-mon[46823]: Upgrade: Target is version 19.2.3-678-ge911bdeb (squid) 2026-03-09T00:15:22.270 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:22 vm04 ceph-mon[46823]: Upgrade: Target container is quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, digests ['quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc'] 2026-03-09T00:15:22.270 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:22 vm04 ceph-mon[46823]: Upgrade: Need to upgrade myself (mgr.x) 2026-03-09T00:15:22.270 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:22 vm04 ceph-mon[46823]: Upgrade: Need to upgrade myself (mgr.x) 2026-03-09T00:15:22.270 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:22 vm04 ceph-mon[46823]: Failing over to other MGR 2026-03-09T00:15:22.270 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:22 vm04 ceph-mon[46823]: from='mgr.24836 ' entity='mgr.x' cmd='[{"prefix": "mgr fail", "who": "x"}]': finished 2026-03-09T00:15:22.270 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:22 vm04 ceph-mon[46823]: mgrmap e36: y(active, starting, since 1.00089s) 2026-03-09T00:15:22.270 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:22 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T00:15:22.270 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:22 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T00:15:22.270 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:22 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T00:15:22.270 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:22 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-09T00:15:22.270 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:22 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T00:15:22.270 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:22 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T00:15:22.270 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:22 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T00:15:22.270 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:22 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T00:15:22.270 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:22 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T00:15:22.270 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:22 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T00:15:22.270 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:22 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T00:15:22.270 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:22 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T00:15:22.270 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:22 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-09T00:15:22.270 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:22 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-09T00:15:22.270 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:22 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-09T00:15:22.270 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:22 vm04 ceph-mon[46823]: Manager daemon y is now available 2026-03-09T00:15:22.270 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:22 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:15:22.270 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:22 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:15:22.270 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:22 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:15:22.270 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:22 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T00:15:22.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:22 vm10 ceph-mon[48982]: Upgrade: Target is version 19.2.3-678-ge911bdeb (squid) 2026-03-09T00:15:22.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:22 vm10 ceph-mon[48982]: Upgrade: Target container is quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, digests ['quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc'] 2026-03-09T00:15:22.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:22 vm10 ceph-mon[48982]: Upgrade: Need to upgrade myself (mgr.x) 2026-03-09T00:15:22.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:22 vm10 ceph-mon[48982]: Upgrade: Need to upgrade myself (mgr.x) 2026-03-09T00:15:22.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:22 vm10 ceph-mon[48982]: Failing over to other MGR 2026-03-09T00:15:22.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:22 vm10 ceph-mon[48982]: from='mgr.24836 ' entity='mgr.x' cmd='[{"prefix": "mgr fail", "who": "x"}]': finished 2026-03-09T00:15:22.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:22 vm10 ceph-mon[48982]: mgrmap e36: y(active, starting, since 1.00089s) 2026-03-09T00:15:22.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:22 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T00:15:22.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:22 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T00:15:22.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:22 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T00:15:22.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:22 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-09T00:15:22.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:22 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T00:15:22.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:22 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T00:15:22.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:22 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T00:15:22.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:22 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T00:15:22.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:22 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T00:15:22.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:22 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T00:15:22.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:22 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T00:15:22.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:22 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T00:15:22.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:22 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-09T00:15:22.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:22 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-09T00:15:22.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:22 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-09T00:15:22.329 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:22 vm10 ceph-mon[48982]: Manager daemon y is now available 2026-03-09T00:15:22.329 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:22 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:15:22.329 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:22 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:15:22.329 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:22 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:15:22.329 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:22 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T00:15:22.329 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:15:22 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: 2026-03-09T00:15:22.064+0000 7f550e822140 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-09T00:15:22.798 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:15:22 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: 2026-03-09T00:15:22.407+0000 7f550e822140 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-09T00:15:22.798 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:15:22 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: /lib64/python3.9/site-packages/scipy/__init__.py:73: UserWarning: NumPy was imported from a Python sub-interpreter but NumPy does not properly support sub-interpreters. This will likely work for most users but might cause hard to track down issues or subtle bugs. A common user of the rare sub-interpreter feature is wsgi which also allows single-interpreter mode. 2026-03-09T00:15:22.798 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:15:22 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: Improvements in the case of bugs are welcome, but is not on the NumPy roadmap, and full support may require significant effort to achieve. 2026-03-09T00:15:22.798 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:15:22 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: from numpy import show_config as show_numpy_config 2026-03-09T00:15:22.798 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:15:22 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: 2026-03-09T00:15:22.498+0000 7f550e822140 -1 mgr[py] Module diskprediction_local has missing NOTIFY_TYPES member 2026-03-09T00:15:22.798 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:15:22 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: 2026-03-09T00:15:22.545+0000 7f550e822140 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-09T00:15:22.798 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:15:22 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: 2026-03-09T00:15:22.624+0000 7f550e822140 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-09T00:15:23.386 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:15:23 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: 2026-03-09T00:15:23.175+0000 7f550e822140 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-09T00:15:23.386 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:15:23 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: 2026-03-09T00:15:23.301+0000 7f550e822140 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-09T00:15:23.386 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:15:23 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: 2026-03-09T00:15:23.347+0000 7f550e822140 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-09T00:15:23.691 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:23 vm10 ceph-mon[48982]: mgrmap e37: y(active, since 2s) 2026-03-09T00:15:23.691 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:23 vm10 ceph-mon[48982]: from='client.15114 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:15:23.691 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:23 vm10 ceph-mon[48982]: pgmap v3: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail 2026-03-09T00:15:23.691 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:15:23 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: 2026-03-09T00:15:23.384+0000 7f550e822140 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-09T00:15:23.691 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:15:23 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: 2026-03-09T00:15:23.430+0000 7f550e822140 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-09T00:15:23.691 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:15:23 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: 2026-03-09T00:15:23.475+0000 7f550e822140 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-09T00:15:23.691 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:15:23 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: 2026-03-09T00:15:23.688+0000 7f550e822140 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-09T00:15:23.691 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:15:23 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[76640]: ts=2026-03-09T00:15:23.599Z caller=refresh.go:90 level=error component="discovery manager scrape" discovery=http config=nvmeof msg="Unable to refresh target groups" err="Get \"http://192.168.123.110:8765/sd/prometheus/sd-config?service=nvmeof\": dial tcp 192.168.123.110:8765: connect: connection refused" 2026-03-09T00:15:23.691 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:15:23 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[76640]: ts=2026-03-09T00:15:23.599Z caller=refresh.go:90 level=error component="discovery manager scrape" discovery=http config=node msg="Unable to refresh target groups" err="Get \"http://192.168.123.110:8765/sd/prometheus/sd-config?service=node-exporter\": dial tcp 192.168.123.110:8765: connect: connection refused" 2026-03-09T00:15:23.691 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:15:23 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[76640]: ts=2026-03-09T00:15:23.599Z caller=refresh.go:90 level=error component="discovery manager scrape" discovery=http config=nfs msg="Unable to refresh target groups" err="Get \"http://192.168.123.110:8765/sd/prometheus/sd-config?service=nfs\": dial tcp 192.168.123.110:8765: connect: connection refused" 2026-03-09T00:15:23.691 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:15:23 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[76640]: ts=2026-03-09T00:15:23.601Z caller=refresh.go:90 level=error component="discovery manager scrape" discovery=http config=ceph msg="Unable to refresh target groups" err="Get \"http://192.168.123.110:8765/sd/prometheus/sd-config?service=mgr-prometheus\": dial tcp 192.168.123.110:8765: connect: connection refused" 2026-03-09T00:15:23.691 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:15:23 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[76640]: ts=2026-03-09T00:15:23.601Z caller=refresh.go:90 level=error component="discovery manager notify" discovery=http config=config-0 msg="Unable to refresh target groups" err="Get \"http://192.168.123.110:8765/sd/prometheus/sd-config?service=alertmanager\": dial tcp 192.168.123.110:8765: connect: connection refused" 2026-03-09T00:15:23.691 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:15:23 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[76640]: ts=2026-03-09T00:15:23.601Z caller=refresh.go:90 level=error component="discovery manager scrape" discovery=http config=ceph-exporter msg="Unable to refresh target groups" err="Get \"http://192.168.123.110:8765/sd/prometheus/sd-config?service=ceph-exporter\": dial tcp 192.168.123.110:8765: connect: connection refused" 2026-03-09T00:15:23.692 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:23 vm04 ceph-mon[51053]: mgrmap e37: y(active, since 2s) 2026-03-09T00:15:23.692 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:23 vm04 ceph-mon[51053]: from='client.15114 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:15:23.692 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:23 vm04 ceph-mon[51053]: pgmap v3: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail 2026-03-09T00:15:23.692 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:23 vm04 ceph-mon[46823]: mgrmap e37: y(active, since 2s) 2026-03-09T00:15:23.693 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:23 vm04 ceph-mon[46823]: from='client.15114 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:15:23.693 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:23 vm04 ceph-mon[46823]: pgmap v3: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail 2026-03-09T00:15:24.003 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:15:23 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: 2026-03-09T00:15:23.749+0000 7f550e822140 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-09T00:15:24.003 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:15:24 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: 2026-03-09T00:15:24.000+0000 7f550e822140 -1 mgr[py] Module rgw has missing NOTIFY_TYPES member 2026-03-09T00:15:24.325 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:15:24 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: 2026-03-09T00:15:24.324+0000 7f550e822140 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-09T00:15:24.578 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:15:24 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: 2026-03-09T00:15:24.363+0000 7f550e822140 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-09T00:15:24.578 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:15:24 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: 2026-03-09T00:15:24.407+0000 7f550e822140 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-09T00:15:24.578 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:15:24 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: 2026-03-09T00:15:24.499+0000 7f550e822140 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-09T00:15:24.578 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:15:24 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: 2026-03-09T00:15:24.549+0000 7f550e822140 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-09T00:15:24.945 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:24 vm10 ceph-mon[48982]: [09/Mar/2026:00:15:23] ENGINE Bus STARTING 2026-03-09T00:15:24.946 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:24 vm10 ceph-mon[48982]: [09/Mar/2026:00:15:23] ENGINE Serving on https://192.168.123.104:7150 2026-03-09T00:15:24.946 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:24 vm10 ceph-mon[48982]: [09/Mar/2026:00:15:23] ENGINE Client ('192.168.123.104', 46386) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-09T00:15:24.946 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:24 vm10 ceph-mon[48982]: pgmap v4: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail 2026-03-09T00:15:24.946 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:24 vm10 ceph-mon[48982]: [09/Mar/2026:00:15:23] ENGINE Serving on http://192.168.123.104:8765 2026-03-09T00:15:24.946 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:24 vm10 ceph-mon[48982]: [09/Mar/2026:00:15:23] ENGINE Bus STARTED 2026-03-09T00:15:24.946 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:24 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:24.946 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:24 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:24.946 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:24 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:24.946 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:24 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:24.946 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:24 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:24.946 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:24 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:24.946 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:24 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm04", "name": "osd_memory_target"}]: dispatch 2026-03-09T00:15:24.946 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:24 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:24.946 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:24 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:24.946 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:24 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm10", "name": "osd_memory_target"}]: dispatch 2026-03-09T00:15:24.946 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:24 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:15:24.946 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:24 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:15:24.946 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:15:24 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: 2026-03-09T00:15:24.649+0000 7f550e822140 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-09T00:15:24.946 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:15:24 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: 2026-03-09T00:15:24.778+0000 7f550e822140 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-09T00:15:25.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:24 vm04 ceph-mon[51053]: [09/Mar/2026:00:15:23] ENGINE Bus STARTING 2026-03-09T00:15:25.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:24 vm04 ceph-mon[51053]: [09/Mar/2026:00:15:23] ENGINE Serving on https://192.168.123.104:7150 2026-03-09T00:15:25.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:24 vm04 ceph-mon[51053]: [09/Mar/2026:00:15:23] ENGINE Client ('192.168.123.104', 46386) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-09T00:15:25.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:24 vm04 ceph-mon[51053]: pgmap v4: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail 2026-03-09T00:15:25.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:24 vm04 ceph-mon[51053]: [09/Mar/2026:00:15:23] ENGINE Serving on http://192.168.123.104:8765 2026-03-09T00:15:25.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:24 vm04 ceph-mon[51053]: [09/Mar/2026:00:15:23] ENGINE Bus STARTED 2026-03-09T00:15:25.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:24 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:25.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:24 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:25.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:24 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:25.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:24 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:25.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:24 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:25.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:24 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:25.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:24 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm04", "name": "osd_memory_target"}]: dispatch 2026-03-09T00:15:25.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:24 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:25.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:24 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:25.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:24 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm10", "name": "osd_memory_target"}]: dispatch 2026-03-09T00:15:25.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:24 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:15:25.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:24 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:15:25.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:24 vm04 ceph-mon[46823]: [09/Mar/2026:00:15:23] ENGINE Bus STARTING 2026-03-09T00:15:25.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:24 vm04 ceph-mon[46823]: [09/Mar/2026:00:15:23] ENGINE Serving on https://192.168.123.104:7150 2026-03-09T00:15:25.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:24 vm04 ceph-mon[46823]: [09/Mar/2026:00:15:23] ENGINE Client ('192.168.123.104', 46386) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-09T00:15:25.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:24 vm04 ceph-mon[46823]: pgmap v4: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail 2026-03-09T00:15:25.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:24 vm04 ceph-mon[46823]: [09/Mar/2026:00:15:23] ENGINE Serving on http://192.168.123.104:8765 2026-03-09T00:15:25.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:24 vm04 ceph-mon[46823]: [09/Mar/2026:00:15:23] ENGINE Bus STARTED 2026-03-09T00:15:25.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:24 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:25.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:24 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:25.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:24 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:25.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:24 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:25.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:24 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:25.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:24 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:25.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:24 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm04", "name": "osd_memory_target"}]: dispatch 2026-03-09T00:15:25.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:24 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:25.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:24 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:25.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:24 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm10", "name": "osd_memory_target"}]: dispatch 2026-03-09T00:15:25.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:24 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:15:25.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:24 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:15:25.328 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:15:24 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: 2026-03-09T00:15:24.944+0000 7f550e822140 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-09T00:15:25.328 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:15:24 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: 2026-03-09T00:15:24.986+0000 7f550e822140 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-09T00:15:25.328 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:15:24 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: [09/Mar/2026:00:15:24] ENGINE Bus STARTING 2026-03-09T00:15:25.328 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:15:24 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: CherryPy Checker: 2026-03-09T00:15:25.328 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:15:24 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: The Application mounted at '' has an empty config. 2026-03-09T00:15:25.328 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:15:24 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: 2026-03-09T00:15:25.328 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:15:25 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: [09/Mar/2026:00:15:25] ENGINE Serving on http://:::9283 2026-03-09T00:15:25.328 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:15:25 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[67804]: [09/Mar/2026:00:15:25] ENGINE Bus STARTED 2026-03-09T00:15:26.101 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:15:26 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: 2026-03-09T00:15:26.003+0000 7f89a1137640 -1 log_channel(cephadm) log [ERR] : cephadm exited with an error code: 1, stderr: Reconfig daemon iscsi.foo.vm04.fbyciv ... 2026-03-09T00:15:26.101 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:15:26 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: Creating ceph-iscsi config... 2026-03-09T00:15:26.101 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:15:26 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: Write file: /var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/iscsi.foo.vm04.fbyciv/iscsi-gateway.cfg 2026-03-09T00:15:26.101 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:15:26 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: Write file: /var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/iscsi.foo.vm04.fbyciv/tcmu-runner-entrypoint.sh 2026-03-09T00:15:26.101 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:15:26 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: Non-zero exit code 1 from systemctl restart ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv 2026-03-09T00:15:26.101 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:15:26 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: systemctl: stderr Job for ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service failed because the control process exited with error code. 2026-03-09T00:15:26.101 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:15:26 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: systemctl: stderr See "systemctl status ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" and "journalctl -xeu ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" for details. 2026-03-09T00:15:26.101 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:15:26 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: Traceback (most recent call last): 2026-03-09T00:15:26.101 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:15:26 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-09T00:15:26.101 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:15:26 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: return _run_code(code, main_globals, None, 2026-03-09T00:15:26.101 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:15:26 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-09T00:15:26.101 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:15:26 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: exec(code, run_globals) 2026-03-09T00:15:26.101 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:15:26 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5592, in 2026-03-09T00:15:26.101 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:15:26 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5580, in main 2026-03-09T00:15:26.101 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:15:26 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3062, in command_deploy_from 2026-03-09T00:15:26.101 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:15:26 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3097, in _common_deploy 2026-03-09T00:15:26.101 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:15:26 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3117, in _deploy_daemon_container 2026-03-09T00:15:26.101 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:15:26 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 1126, in deploy_daemon 2026-03-09T00:15:26.101 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:15:26 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: File "/tmp/tmpbhp06bz6.cephadm.build/app/cephadmlib/call_wrappers.py", line 307, in call_throws 2026-03-09T00:15:26.101 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:15:26 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: RuntimeError: Failed command: systemctl restart ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv: Job for ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service failed because the control process exited with error code. 2026-03-09T00:15:26.101 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:15:26 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: See "systemctl status ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" and "journalctl -xeu ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" for details. 2026-03-09T00:15:26.101 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:15:26 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: Traceback (most recent call last): 2026-03-09T00:15:26.101 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:15:26 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1145, in _check_daemons 2026-03-09T00:15:26.101 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:15:26 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: self.mgr._daemon_action(daemon_spec, action=action) 2026-03-09T00:15:26.101 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:15:26 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: File "/usr/share/ceph/mgr/cephadm/module.py", line 2545, in _daemon_action 2026-03-09T00:15:26.101 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:15:26 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: return self.wait_async( 2026-03-09T00:15:26.101 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:15:26 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: File "/usr/share/ceph/mgr/cephadm/module.py", line 815, in wait_async 2026-03-09T00:15:26.101 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:15:26 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: return self.event_loop.get_result(coro, timeout) 2026-03-09T00:15:26.101 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:15:26 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: File "/usr/share/ceph/mgr/cephadm/ssh.py", line 136, in get_result 2026-03-09T00:15:26.101 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:15:26 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: return future.result(timeout) 2026-03-09T00:15:26.101 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:15:26 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: File "/lib64/python3.9/concurrent/futures/_base.py", line 446, in result 2026-03-09T00:15:26.102 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:15:26 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: return self.__get_result() 2026-03-09T00:15:26.102 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:15:26 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: File "/lib64/python3.9/concurrent/futures/_base.py", line 391, in __get_result 2026-03-09T00:15:26.102 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:15:26 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: raise self._exception 2026-03-09T00:15:26.102 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:15:26 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1381, in _create_daemon 2026-03-09T00:15:26.102 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:15:26 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: out, err, code = await self._run_cephadm( 2026-03-09T00:15:26.102 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:15:26 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1724, in _run_cephadm 2026-03-09T00:15:26.102 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:15:26 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: raise OrchestratorError( 2026-03-09T00:15:26.102 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:15:26 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: orchestrator._interface.OrchestratorError: cephadm exited with an error code: 1, stderr: Reconfig daemon iscsi.foo.vm04.fbyciv ... 2026-03-09T00:15:26.102 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:15:26 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: Creating ceph-iscsi config... 2026-03-09T00:15:26.102 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:15:26 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: Write file: /var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/iscsi.foo.vm04.fbyciv/iscsi-gateway.cfg 2026-03-09T00:15:26.102 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:15:26 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: Write file: /var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/iscsi.foo.vm04.fbyciv/tcmu-runner-entrypoint.sh 2026-03-09T00:15:26.102 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:15:26 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: Non-zero exit code 1 from systemctl restart ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv 2026-03-09T00:15:26.102 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:15:26 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: systemctl: stderr Job for ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service failed because the control process exited with error code. 2026-03-09T00:15:26.102 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:15:26 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: systemctl: stderr See "systemctl status ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" and "journalctl -xeu ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" for details. 2026-03-09T00:15:26.102 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:15:26 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: Traceback (most recent call last): 2026-03-09T00:15:26.102 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:15:26 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-09T00:15:26.102 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:15:26 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: return _run_code(code, main_globals, None, 2026-03-09T00:15:26.102 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:15:26 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-09T00:15:26.102 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:15:26 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: exec(code, run_globals) 2026-03-09T00:15:26.102 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:15:26 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5592, in 2026-03-09T00:15:26.102 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:15:26 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5580, in main 2026-03-09T00:15:26.102 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:15:26 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3062, in command_deploy_from 2026-03-09T00:15:26.102 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:15:26 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3097, in _common_deploy 2026-03-09T00:15:26.102 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:15:26 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3117, in _deploy_daemon_container 2026-03-09T00:15:26.102 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:15:26 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 1126, in deploy_daemon 2026-03-09T00:15:26.102 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:15:26 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: File "/tmp/tmpbhp06bz6.cephadm.build/app/cephadmlib/call_wrappers.py", line 307, in call_throws 2026-03-09T00:15:26.102 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:15:26 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: RuntimeError: Failed command: systemctl restart ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv: Job for ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service failed because the control process exited with error code. 2026-03-09T00:15:26.102 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:15:26 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: See "systemctl status ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" and "journalctl -xeu ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" for details. 2026-03-09T00:15:26.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:25 vm04 ceph-mon[46823]: Updating vm04:/etc/ceph/ceph.conf 2026-03-09T00:15:26.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:25 vm04 ceph-mon[46823]: Updating vm10:/etc/ceph/ceph.conf 2026-03-09T00:15:26.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:25 vm04 ceph-mon[46823]: Updating vm04:/var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/config/ceph.conf 2026-03-09T00:15:26.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:25 vm04 ceph-mon[46823]: Updating vm10:/var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/config/ceph.conf 2026-03-09T00:15:26.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:25 vm04 ceph-mon[46823]: Updating vm04:/etc/ceph/ceph.client.admin.keyring 2026-03-09T00:15:26.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:25 vm04 ceph-mon[46823]: Updating vm10:/etc/ceph/ceph.client.admin.keyring 2026-03-09T00:15:26.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:25 vm04 ceph-mon[46823]: Updating vm04:/var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/config/ceph.client.admin.keyring 2026-03-09T00:15:26.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:25 vm04 ceph-mon[46823]: Updating vm10:/var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/config/ceph.client.admin.keyring 2026-03-09T00:15:26.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:25 vm04 ceph-mon[46823]: mgrmap e38: y(active, since 4s) 2026-03-09T00:15:26.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:25 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:26.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:25 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:26.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:25 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:26.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:25 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:26.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:25 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:26.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:25 vm04 ceph-mon[46823]: Reconfiguring iscsi.foo.vm04.fbyciv (dependencies changed)... 2026-03-09T00:15:26.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:25 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm04.fbyciv", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T00:15:26.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:25 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:15:26.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:25 vm04 ceph-mon[46823]: Reconfiguring daemon iscsi.foo.vm04.fbyciv on vm04 2026-03-09T00:15:26.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:25 vm04 ceph-mon[46823]: Standby manager daemon x started 2026-03-09T00:15:26.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:25 vm04 ceph-mon[46823]: from='mgr.? 192.168.123.110:0/1421146928' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-09T00:15:26.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:25 vm04 ceph-mon[46823]: from='mgr.? 192.168.123.110:0/1421146928' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-09T00:15:26.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:25 vm04 ceph-mon[46823]: from='mgr.? 192.168.123.110:0/1421146928' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-09T00:15:26.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:25 vm04 ceph-mon[46823]: from='mgr.? 192.168.123.110:0/1421146928' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-09T00:15:26.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:25 vm04 ceph-mon[51053]: Updating vm04:/etc/ceph/ceph.conf 2026-03-09T00:15:26.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:25 vm04 ceph-mon[51053]: Updating vm10:/etc/ceph/ceph.conf 2026-03-09T00:15:26.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:25 vm04 ceph-mon[51053]: Updating vm04:/var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/config/ceph.conf 2026-03-09T00:15:26.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:25 vm04 ceph-mon[51053]: Updating vm10:/var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/config/ceph.conf 2026-03-09T00:15:26.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:25 vm04 ceph-mon[51053]: Updating vm04:/etc/ceph/ceph.client.admin.keyring 2026-03-09T00:15:26.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:25 vm04 ceph-mon[51053]: Updating vm10:/etc/ceph/ceph.client.admin.keyring 2026-03-09T00:15:26.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:25 vm04 ceph-mon[51053]: Updating vm04:/var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/config/ceph.client.admin.keyring 2026-03-09T00:15:26.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:25 vm04 ceph-mon[51053]: Updating vm10:/var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/config/ceph.client.admin.keyring 2026-03-09T00:15:26.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:25 vm04 ceph-mon[51053]: mgrmap e38: y(active, since 4s) 2026-03-09T00:15:26.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:25 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:26.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:25 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:26.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:25 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:26.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:25 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:26.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:25 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:26.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:25 vm04 ceph-mon[51053]: Reconfiguring iscsi.foo.vm04.fbyciv (dependencies changed)... 2026-03-09T00:15:26.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:25 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm04.fbyciv", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T00:15:26.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:25 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:15:26.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:25 vm04 ceph-mon[51053]: Reconfiguring daemon iscsi.foo.vm04.fbyciv on vm04 2026-03-09T00:15:26.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:25 vm04 ceph-mon[51053]: Standby manager daemon x started 2026-03-09T00:15:26.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:25 vm04 ceph-mon[51053]: from='mgr.? 192.168.123.110:0/1421146928' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-09T00:15:26.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:25 vm04 ceph-mon[51053]: from='mgr.? 192.168.123.110:0/1421146928' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-09T00:15:26.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:25 vm04 ceph-mon[51053]: from='mgr.? 192.168.123.110:0/1421146928' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-09T00:15:26.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:25 vm04 ceph-mon[51053]: from='mgr.? 192.168.123.110:0/1421146928' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-09T00:15:26.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:25 vm10 ceph-mon[48982]: Updating vm04:/etc/ceph/ceph.conf 2026-03-09T00:15:26.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:25 vm10 ceph-mon[48982]: Updating vm10:/etc/ceph/ceph.conf 2026-03-09T00:15:26.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:25 vm10 ceph-mon[48982]: Updating vm04:/var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/config/ceph.conf 2026-03-09T00:15:26.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:25 vm10 ceph-mon[48982]: Updating vm10:/var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/config/ceph.conf 2026-03-09T00:15:26.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:25 vm10 ceph-mon[48982]: Updating vm04:/etc/ceph/ceph.client.admin.keyring 2026-03-09T00:15:26.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:25 vm10 ceph-mon[48982]: Updating vm10:/etc/ceph/ceph.client.admin.keyring 2026-03-09T00:15:26.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:25 vm10 ceph-mon[48982]: Updating vm04:/var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/config/ceph.client.admin.keyring 2026-03-09T00:15:26.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:25 vm10 ceph-mon[48982]: Updating vm10:/var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/config/ceph.client.admin.keyring 2026-03-09T00:15:26.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:25 vm10 ceph-mon[48982]: mgrmap e38: y(active, since 4s) 2026-03-09T00:15:26.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:25 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:26.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:25 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:26.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:25 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:26.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:25 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:26.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:25 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:26.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:25 vm10 ceph-mon[48982]: Reconfiguring iscsi.foo.vm04.fbyciv (dependencies changed)... 2026-03-09T00:15:26.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:25 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm04.fbyciv", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T00:15:26.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:25 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:15:26.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:25 vm10 ceph-mon[48982]: Reconfiguring daemon iscsi.foo.vm04.fbyciv on vm04 2026-03-09T00:15:26.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:25 vm10 ceph-mon[48982]: Standby manager daemon x started 2026-03-09T00:15:26.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:25 vm10 ceph-mon[48982]: from='mgr.? 192.168.123.110:0/1421146928' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-09T00:15:26.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:25 vm10 ceph-mon[48982]: from='mgr.? 192.168.123.110:0/1421146928' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-09T00:15:26.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:25 vm10 ceph-mon[48982]: from='mgr.? 192.168.123.110:0/1421146928' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-09T00:15:26.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:25 vm10 ceph-mon[48982]: from='mgr.? 192.168.123.110:0/1421146928' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-09T00:15:26.619 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:15:26 vm10 systemd[1]: Stopping Ceph prometheus.a for fdcbddf6-1b49-11f1-80b0-7392062373f9... 2026-03-09T00:15:26.872 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:15:26 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[76640]: ts=2026-03-09T00:15:26.617Z caller=main.go:964 level=warn msg="Received SIGTERM, exiting gracefully..." 2026-03-09T00:15:26.872 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:15:26 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[76640]: ts=2026-03-09T00:15:26.617Z caller=main.go:988 level=info msg="Stopping scrape discovery manager..." 2026-03-09T00:15:26.872 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:15:26 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[76640]: ts=2026-03-09T00:15:26.617Z caller=main.go:1002 level=info msg="Stopping notify discovery manager..." 2026-03-09T00:15:26.872 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:15:26 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[76640]: ts=2026-03-09T00:15:26.617Z caller=manager.go:177 level=info component="rule manager" msg="Stopping rule manager..." 2026-03-09T00:15:26.872 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:15:26 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[76640]: ts=2026-03-09T00:15:26.617Z caller=manager.go:187 level=info component="rule manager" msg="Rule manager stopped" 2026-03-09T00:15:26.872 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:15:26 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[76640]: ts=2026-03-09T00:15:26.617Z caller=main.go:1039 level=info msg="Stopping scrape manager..." 2026-03-09T00:15:26.872 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:15:26 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[76640]: ts=2026-03-09T00:15:26.617Z caller=main.go:984 level=info msg="Scrape discovery manager stopped" 2026-03-09T00:15:26.872 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:15:26 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[76640]: ts=2026-03-09T00:15:26.617Z caller=main.go:998 level=info msg="Notify discovery manager stopped" 2026-03-09T00:15:26.872 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:15:26 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[76640]: ts=2026-03-09T00:15:26.618Z caller=main.go:1031 level=info msg="Scrape manager stopped" 2026-03-09T00:15:26.872 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:15:26 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[76640]: ts=2026-03-09T00:15:26.622Z caller=notifier.go:618 level=info component=notifier msg="Stopping notification manager..." 2026-03-09T00:15:26.872 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:15:26 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[76640]: ts=2026-03-09T00:15:26.622Z caller=main.go:1261 level=info msg="Notifier manager stopped" 2026-03-09T00:15:26.872 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:15:26 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[76640]: ts=2026-03-09T00:15:26.622Z caller=main.go:1273 level=info msg="See you next time!" 2026-03-09T00:15:26.872 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:15:26 vm10 podman[78739]: 2026-03-09 00:15:26.633230745 +0000 UTC m=+0.033749145 container died 77372237e49c99284c1c8f9fc08b5b4a999d6b9597bec34f37050d93162a9afd (image=quay.io/prometheus/prometheus:v2.51.0, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-09T00:15:26.872 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:15:26 vm10 podman[78739]: 2026-03-09 00:15:26.652691037 +0000 UTC m=+0.053209437 container remove 77372237e49c99284c1c8f9fc08b5b4a999d6b9597bec34f37050d93162a9afd (image=quay.io/prometheus/prometheus:v2.51.0, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-09T00:15:26.872 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:15:26 vm10 bash[78739]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a 2026-03-09T00:15:26.872 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:15:26 vm10 systemd[1]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@prometheus.a.service: Deactivated successfully. 2026-03-09T00:15:26.872 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:15:26 vm10 systemd[1]: Stopped Ceph prometheus.a for fdcbddf6-1b49-11f1-80b0-7392062373f9. 2026-03-09T00:15:26.872 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:15:26 vm10 systemd[1]: Starting Ceph prometheus.a for fdcbddf6-1b49-11f1-80b0-7392062373f9... 2026-03-09T00:15:26.872 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:15:26 vm10 podman[78805]: 2026-03-09 00:15:26.831103633 +0000 UTC m=+0.021648658 container create 1f53121cfa7ff7ca2684785c9eb0af4653c0a215bfd7cb92bb60d540bc8d326a (image=quay.io/prometheus/prometheus:v2.51.0, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-09T00:15:26.872 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:15:26 vm10 podman[78805]: 2026-03-09 00:15:26.868543806 +0000 UTC m=+0.059088840 container init 1f53121cfa7ff7ca2684785c9eb0af4653c0a215bfd7cb92bb60d540bc8d326a (image=quay.io/prometheus/prometheus:v2.51.0, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-09T00:15:27.215 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:26 vm10 ceph-mon[48982]: pgmap v5: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail 2026-03-09T00:15:27.215 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:26 vm10 ceph-mon[48982]: mgrmap e39: y(active, since 5s), standbys: x 2026-03-09T00:15:27.215 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:26 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-09T00:15:27.215 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:26 vm10 ceph-mon[48982]: cephadm exited with an error code: 1, stderr: Reconfig daemon iscsi.foo.vm04.fbyciv ... 2026-03-09T00:15:27.215 INFO:journalctl@ceph.mon.b.vm10.stdout: Creating ceph-iscsi config... 2026-03-09T00:15:27.215 INFO:journalctl@ceph.mon.b.vm10.stdout: Write file: /var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/iscsi.foo.vm04.fbyciv/iscsi-gateway.cfg 2026-03-09T00:15:27.215 INFO:journalctl@ceph.mon.b.vm10.stdout: Write file: /var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/iscsi.foo.vm04.fbyciv/tcmu-runner-entrypoint.sh 2026-03-09T00:15:27.215 INFO:journalctl@ceph.mon.b.vm10.stdout: Non-zero exit code 1 from systemctl restart ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv 2026-03-09T00:15:27.215 INFO:journalctl@ceph.mon.b.vm10.stdout: systemctl: stderr Job for ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service failed because the control process exited with error code. 2026-03-09T00:15:27.215 INFO:journalctl@ceph.mon.b.vm10.stdout: systemctl: stderr See "systemctl status ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" and "journalctl -xeu ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" for details. 2026-03-09T00:15:27.215 INFO:journalctl@ceph.mon.b.vm10.stdout: Traceback (most recent call last): 2026-03-09T00:15:27.215 INFO:journalctl@ceph.mon.b.vm10.stdout: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-09T00:15:27.215 INFO:journalctl@ceph.mon.b.vm10.stdout: return _run_code(code, main_globals, None, 2026-03-09T00:15:27.215 INFO:journalctl@ceph.mon.b.vm10.stdout: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-09T00:15:27.215 INFO:journalctl@ceph.mon.b.vm10.stdout: exec(code, run_globals) 2026-03-09T00:15:27.215 INFO:journalctl@ceph.mon.b.vm10.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5592, in 2026-03-09T00:15:27.215 INFO:journalctl@ceph.mon.b.vm10.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5580, in main 2026-03-09T00:15:27.215 INFO:journalctl@ceph.mon.b.vm10.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3062, in command_deploy_from 2026-03-09T00:15:27.215 INFO:journalctl@ceph.mon.b.vm10.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3097, in _common_deploy 2026-03-09T00:15:27.215 INFO:journalctl@ceph.mon.b.vm10.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3117, in _deploy_daemon_container 2026-03-09T00:15:27.215 INFO:journalctl@ceph.mon.b.vm10.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 1126, in deploy_daemon 2026-03-09T00:15:27.215 INFO:journalctl@ceph.mon.b.vm10.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/cephadmlib/call_wrappers.py", line 307, in call_throws 2026-03-09T00:15:27.215 INFO:journalctl@ceph.mon.b.vm10.stdout: RuntimeError: Failed command: systemctl restart ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv: Job for ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service failed because the control process exited with error code. 2026-03-09T00:15:27.215 INFO:journalctl@ceph.mon.b.vm10.stdout: See "systemctl status ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" and "journalctl -xeu ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" for details. 2026-03-09T00:15:27.215 INFO:journalctl@ceph.mon.b.vm10.stdout: Traceback (most recent call last): 2026-03-09T00:15:27.215 INFO:journalctl@ceph.mon.b.vm10.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1145, in _check_daemons 2026-03-09T00:15:27.215 INFO:journalctl@ceph.mon.b.vm10.stdout: self.mgr._daemon_action(daemon_spec, action=action) 2026-03-09T00:15:27.215 INFO:journalctl@ceph.mon.b.vm10.stdout: File "/usr/share/ceph/mgr/cephadm/module.py", line 2545, in _daemon_action 2026-03-09T00:15:27.215 INFO:journalctl@ceph.mon.b.vm10.stdout: return self.wait_async( 2026-03-09T00:15:27.215 INFO:journalctl@ceph.mon.b.vm10.stdout: File "/usr/share/ceph/mgr/cephadm/module.py", line 815, in wait_async 2026-03-09T00:15:27.215 INFO:journalctl@ceph.mon.b.vm10.stdout: return self.event_loop.get_result(coro, timeout) 2026-03-09T00:15:27.215 INFO:journalctl@ceph.mon.b.vm10.stdout: File "/usr/share/ceph/mgr/cephadm/ssh.py", line 136, in get_result 2026-03-09T00:15:27.215 INFO:journalctl@ceph.mon.b.vm10.stdout: return future.result(timeout) 2026-03-09T00:15:27.215 INFO:journalctl@ceph.mon.b.vm10.stdout: File "/lib64/python3.9/concurrent/futures/_base.py", line 446, in result 2026-03-09T00:15:27.215 INFO:journalctl@ceph.mon.b.vm10.stdout: return self.__get_result() 2026-03-09T00:15:27.215 INFO:journalctl@ceph.mon.b.vm10.stdout: File "/lib64/python3.9/concurrent/futures/_base.py", line 391, in __get_result 2026-03-09T00:15:27.215 INFO:journalctl@ceph.mon.b.vm10.stdout: raise self._exception 2026-03-09T00:15:27.215 INFO:journalctl@ceph.mon.b.vm10.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1381, in _create_daemon 2026-03-09T00:15:27.215 INFO:journalctl@ceph.mon.b.vm10.stdout: out, err, code = await self._run_cephadm( 2026-03-09T00:15:27.215 INFO:journalctl@ceph.mon.b.vm10.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1724, in _run_cephadm 2026-03-09T00:15:27.216 INFO:journalctl@ceph.mon.b.vm10.stdout: raise OrchestratorError( 2026-03-09T00:15:27.216 INFO:journalctl@ceph.mon.b.vm10.stdout: orchestrator._interface.OrchestratorError: cephadm exited with an error code: 1, stderr: Reconfig daemon iscsi.foo.vm04.fbyciv ... 2026-03-09T00:15:27.216 INFO:journalctl@ceph.mon.b.vm10.stdout: Creating ceph-iscsi config... 2026-03-09T00:15:27.216 INFO:journalctl@ceph.mon.b.vm10.stdout: Write file: /var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/iscsi.foo.vm04.fbyciv/iscsi-gateway.cfg 2026-03-09T00:15:27.216 INFO:journalctl@ceph.mon.b.vm10.stdout: Write file: /var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/iscsi.foo.vm04.fbyciv/tcmu-runner-entrypoint.sh 2026-03-09T00:15:27.216 INFO:journalctl@ceph.mon.b.vm10.stdout: Non-zero exit code 1 from systemctl restart ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv 2026-03-09T00:15:27.216 INFO:journalctl@ceph.mon.b.vm10.stdout: systemctl: stderr Job for ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service failed because the control process exited with error code. 2026-03-09T00:15:27.216 INFO:journalctl@ceph.mon.b.vm10.stdout: systemctl: stderr See "systemctl status ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" and "journalctl -xeu ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" for details. 2026-03-09T00:15:27.216 INFO:journalctl@ceph.mon.b.vm10.stdout: Traceback (most recent call last): 2026-03-09T00:15:27.216 INFO:journalctl@ceph.mon.b.vm10.stdout: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-09T00:15:27.216 INFO:journalctl@ceph.mon.b.vm10.stdout: return _run_code(code, main_globals, None, 2026-03-09T00:15:27.216 INFO:journalctl@ceph.mon.b.vm10.stdout: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-09T00:15:27.216 INFO:journalctl@ceph.mon.b.vm10.stdout: exec(code, run_globals) 2026-03-09T00:15:27.216 INFO:journalctl@ceph.mon.b.vm10.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5592, in 2026-03-09T00:15:27.216 INFO:journalctl@ceph.mon.b.vm10.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5580, in main 2026-03-09T00:15:27.216 INFO:journalctl@ceph.mon.b.vm10.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3062, in command_deploy_from 2026-03-09T00:15:27.216 INFO:journalctl@ceph.mon.b.vm10.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3097, in _common_deploy 2026-03-09T00:15:27.216 INFO:journalctl@ceph.mon.b.vm10.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3117, in _deploy_daemon_container 2026-03-09T00:15:27.216 INFO:journalctl@ceph.mon.b.vm10.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 1126, in deploy_daemon 2026-03-09T00:15:27.216 INFO:journalctl@ceph.mon.b.vm10.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/cephadmlib/call_wrappers.py", line 307, in call_throws 2026-03-09T00:15:27.216 INFO:journalctl@ceph.mon.b.vm10.stdout: RuntimeError: Failed command: systemctl restart ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv: Job for ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service failed because the control process exited with error code. 2026-03-09T00:15:27.216 INFO:journalctl@ceph.mon.b.vm10.stdout: See "systemctl status ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" and "journalctl -xeu ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" for details. 2026-03-09T00:15:27.216 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:26 vm10 ceph-mon[48982]: Reconfiguring prometheus.a (dependencies changed)... 2026-03-09T00:15:27.216 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:26 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:27.216 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:26 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:27.216 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:26 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-09T00:15:27.216 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:26 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm10.local:9095"}]: dispatch 2026-03-09T00:15:27.216 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:26 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:27.216 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:15:26 vm10 podman[78805]: 2026-03-09 00:15:26.871631314 +0000 UTC m=+0.062176339 container start 1f53121cfa7ff7ca2684785c9eb0af4653c0a215bfd7cb92bb60d540bc8d326a (image=quay.io/prometheus/prometheus:v2.51.0, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-09T00:15:27.216 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:15:26 vm10 bash[78805]: 1f53121cfa7ff7ca2684785c9eb0af4653c0a215bfd7cb92bb60d540bc8d326a 2026-03-09T00:15:27.216 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:15:26 vm10 podman[78805]: 2026-03-09 00:15:26.822921981 +0000 UTC m=+0.013467015 image pull 1d3b7f56885b6dd623f1785be963aa9c195f86bc256ea454e8d02a7980b79c53 quay.io/prometheus/prometheus:v2.51.0 2026-03-09T00:15:27.216 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:15:26 vm10 systemd[1]: Started Ceph prometheus.a for fdcbddf6-1b49-11f1-80b0-7392062373f9. 2026-03-09T00:15:27.216 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:15:26 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[78816]: ts=2026-03-09T00:15:26.899Z caller=main.go:617 level=info msg="Starting Prometheus Server" mode=server version="(version=2.51.0, branch=HEAD, revision=c05c15512acb675e3f6cd662a6727854e93fc024)" 2026-03-09T00:15:27.216 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:15:26 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[78816]: ts=2026-03-09T00:15:26.899Z caller=main.go:622 level=info build_context="(go=go1.22.1, platform=linux/amd64, user=root@b5723e458358, date=20240319-10:54:45, tags=netgo,builtinassets,stringlabels)" 2026-03-09T00:15:27.216 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:15:26 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[78816]: ts=2026-03-09T00:15:26.899Z caller=main.go:623 level=info host_details="(Linux 5.14.0-686.el9.x86_64 #1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026 x86_64 vm10 (none))" 2026-03-09T00:15:27.216 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:15:26 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[78816]: ts=2026-03-09T00:15:26.899Z caller=main.go:624 level=info fd_limits="(soft=1048576, hard=1048576)" 2026-03-09T00:15:27.216 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:15:26 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[78816]: ts=2026-03-09T00:15:26.899Z caller=main.go:625 level=info vm_limits="(soft=unlimited, hard=unlimited)" 2026-03-09T00:15:27.216 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:15:26 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[78816]: ts=2026-03-09T00:15:26.901Z caller=web.go:568 level=info component=web msg="Start listening for connections" address=:9095 2026-03-09T00:15:27.216 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:15:26 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[78816]: ts=2026-03-09T00:15:26.902Z caller=main.go:1129 level=info msg="Starting TSDB ..." 2026-03-09T00:15:27.216 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:15:26 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[78816]: ts=2026-03-09T00:15:26.902Z caller=tls_config.go:313 level=info component=web msg="Listening on" address=[::]:9095 2026-03-09T00:15:27.216 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:15:26 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[78816]: ts=2026-03-09T00:15:26.903Z caller=tls_config.go:316 level=info component=web msg="TLS is disabled." http2=false address=[::]:9095 2026-03-09T00:15:27.216 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:15:26 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[78816]: ts=2026-03-09T00:15:26.910Z caller=head.go:616 level=info component=tsdb msg="Replaying on-disk memory mappable chunks if any" 2026-03-09T00:15:27.216 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:15:26 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[78816]: ts=2026-03-09T00:15:26.911Z caller=head.go:698 level=info component=tsdb msg="On-disk memory mappable chunks replay completed" duration=674.993µs 2026-03-09T00:15:27.216 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:15:26 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[78816]: ts=2026-03-09T00:15:26.911Z caller=head.go:706 level=info component=tsdb msg="Replaying WAL, this may take a while" 2026-03-09T00:15:27.216 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:15:26 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[78816]: ts=2026-03-09T00:15:26.917Z caller=head.go:778 level=info component=tsdb msg="WAL segment loaded" segment=0 maxSegment=5 2026-03-09T00:15:27.216 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:15:26 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[78816]: ts=2026-03-09T00:15:26.929Z caller=head.go:778 level=info component=tsdb msg="WAL segment loaded" segment=1 maxSegment=5 2026-03-09T00:15:27.216 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:15:26 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[78816]: ts=2026-03-09T00:15:26.935Z caller=head.go:778 level=info component=tsdb msg="WAL segment loaded" segment=2 maxSegment=5 2026-03-09T00:15:27.216 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:15:26 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[78816]: ts=2026-03-09T00:15:26.938Z caller=head.go:778 level=info component=tsdb msg="WAL segment loaded" segment=3 maxSegment=5 2026-03-09T00:15:27.216 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:15:26 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[78816]: ts=2026-03-09T00:15:26.943Z caller=head.go:778 level=info component=tsdb msg="WAL segment loaded" segment=4 maxSegment=5 2026-03-09T00:15:27.216 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:15:26 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[78816]: ts=2026-03-09T00:15:26.944Z caller=head.go:778 level=info component=tsdb msg="WAL segment loaded" segment=5 maxSegment=5 2026-03-09T00:15:27.216 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:15:26 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[78816]: ts=2026-03-09T00:15:26.944Z caller=head.go:815 level=info component=tsdb msg="WAL replay completed" checkpoint_replay_duration=31.298µs wal_replay_duration=32.749372ms wbl_replay_duration=180ns total_replay_duration=33.605044ms 2026-03-09T00:15:27.216 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:15:26 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[78816]: ts=2026-03-09T00:15:26.948Z caller=main.go:1150 level=info fs_type=XFS_SUPER_MAGIC 2026-03-09T00:15:27.216 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:15:26 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[78816]: ts=2026-03-09T00:15:26.948Z caller=main.go:1153 level=info msg="TSDB started" 2026-03-09T00:15:27.216 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:15:26 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[78816]: ts=2026-03-09T00:15:26.948Z caller=main.go:1335 level=info msg="Loading configuration file" filename=/etc/prometheus/prometheus.yml 2026-03-09T00:15:27.216 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:15:26 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[78816]: ts=2026-03-09T00:15:26.959Z caller=main.go:1372 level=info msg="Completed loading of configuration file" filename=/etc/prometheus/prometheus.yml totalDuration=10.833898ms db_storage=1.092µs remote_storage=23.164µs web_handler=601ns query_engine=1.293µs scrape=730.036µs scrape_sd=143.288µs notify=19.116µs notify_sd=13.606µs rules=9.466658ms tracing=6.342µs 2026-03-09T00:15:27.216 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:15:26 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[78816]: ts=2026-03-09T00:15:26.959Z caller=main.go:1114 level=info msg="Server is ready to receive web requests." 2026-03-09T00:15:27.217 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:15:26 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-prometheus-a[78816]: ts=2026-03-09T00:15:26.959Z caller=manager.go:163 level=info component="rule manager" msg="Starting rule manager..." 2026-03-09T00:15:27.282 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:15:26 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:15:26.987Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:15:27.282 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:15:26 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:15:26.994Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:15:27.283 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:26 vm04 ceph-mon[51053]: pgmap v5: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail 2026-03-09T00:15:27.283 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:26 vm04 ceph-mon[51053]: mgrmap e39: y(active, since 5s), standbys: x 2026-03-09T00:15:27.283 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:26 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-09T00:15:27.283 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:26 vm04 ceph-mon[51053]: cephadm exited with an error code: 1, stderr: Reconfig daemon iscsi.foo.vm04.fbyciv ... 2026-03-09T00:15:27.283 INFO:journalctl@ceph.mon.c.vm04.stdout: Creating ceph-iscsi config... 2026-03-09T00:15:27.283 INFO:journalctl@ceph.mon.c.vm04.stdout: Write file: /var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/iscsi.foo.vm04.fbyciv/iscsi-gateway.cfg 2026-03-09T00:15:27.283 INFO:journalctl@ceph.mon.c.vm04.stdout: Write file: /var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/iscsi.foo.vm04.fbyciv/tcmu-runner-entrypoint.sh 2026-03-09T00:15:27.283 INFO:journalctl@ceph.mon.c.vm04.stdout: Non-zero exit code 1 from systemctl restart ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv 2026-03-09T00:15:27.283 INFO:journalctl@ceph.mon.c.vm04.stdout: systemctl: stderr Job for ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service failed because the control process exited with error code. 2026-03-09T00:15:27.283 INFO:journalctl@ceph.mon.c.vm04.stdout: systemctl: stderr See "systemctl status ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" and "journalctl -xeu ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" for details. 2026-03-09T00:15:27.283 INFO:journalctl@ceph.mon.c.vm04.stdout: Traceback (most recent call last): 2026-03-09T00:15:27.283 INFO:journalctl@ceph.mon.c.vm04.stdout: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-09T00:15:27.283 INFO:journalctl@ceph.mon.c.vm04.stdout: return _run_code(code, main_globals, None, 2026-03-09T00:15:27.283 INFO:journalctl@ceph.mon.c.vm04.stdout: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-09T00:15:27.283 INFO:journalctl@ceph.mon.c.vm04.stdout: exec(code, run_globals) 2026-03-09T00:15:27.283 INFO:journalctl@ceph.mon.c.vm04.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5592, in 2026-03-09T00:15:27.283 INFO:journalctl@ceph.mon.c.vm04.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5580, in main 2026-03-09T00:15:27.283 INFO:journalctl@ceph.mon.c.vm04.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3062, in command_deploy_from 2026-03-09T00:15:27.283 INFO:journalctl@ceph.mon.c.vm04.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3097, in _common_deploy 2026-03-09T00:15:27.283 INFO:journalctl@ceph.mon.c.vm04.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3117, in _deploy_daemon_container 2026-03-09T00:15:27.283 INFO:journalctl@ceph.mon.c.vm04.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 1126, in deploy_daemon 2026-03-09T00:15:27.283 INFO:journalctl@ceph.mon.c.vm04.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/cephadmlib/call_wrappers.py", line 307, in call_throws 2026-03-09T00:15:27.283 INFO:journalctl@ceph.mon.c.vm04.stdout: RuntimeError: Failed command: systemctl restart ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv: Job for ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service failed because the control process exited with error code. 2026-03-09T00:15:27.283 INFO:journalctl@ceph.mon.c.vm04.stdout: See "systemctl status ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" and "journalctl -xeu ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" for details. 2026-03-09T00:15:27.283 INFO:journalctl@ceph.mon.c.vm04.stdout: Traceback (most recent call last): 2026-03-09T00:15:27.283 INFO:journalctl@ceph.mon.c.vm04.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1145, in _check_daemons 2026-03-09T00:15:27.283 INFO:journalctl@ceph.mon.c.vm04.stdout: self.mgr._daemon_action(daemon_spec, action=action) 2026-03-09T00:15:27.283 INFO:journalctl@ceph.mon.c.vm04.stdout: File "/usr/share/ceph/mgr/cephadm/module.py", line 2545, in _daemon_action 2026-03-09T00:15:27.283 INFO:journalctl@ceph.mon.c.vm04.stdout: return self.wait_async( 2026-03-09T00:15:27.283 INFO:journalctl@ceph.mon.c.vm04.stdout: File "/usr/share/ceph/mgr/cephadm/module.py", line 815, in wait_async 2026-03-09T00:15:27.283 INFO:journalctl@ceph.mon.c.vm04.stdout: return self.event_loop.get_result(coro, timeout) 2026-03-09T00:15:27.283 INFO:journalctl@ceph.mon.c.vm04.stdout: File "/usr/share/ceph/mgr/cephadm/ssh.py", line 136, in get_result 2026-03-09T00:15:27.283 INFO:journalctl@ceph.mon.c.vm04.stdout: return future.result(timeout) 2026-03-09T00:15:27.283 INFO:journalctl@ceph.mon.c.vm04.stdout: File "/lib64/python3.9/concurrent/futures/_base.py", line 446, in result 2026-03-09T00:15:27.283 INFO:journalctl@ceph.mon.c.vm04.stdout: return self.__get_result() 2026-03-09T00:15:27.283 INFO:journalctl@ceph.mon.c.vm04.stdout: File "/lib64/python3.9/concurrent/futures/_base.py", line 391, in __get_result 2026-03-09T00:15:27.283 INFO:journalctl@ceph.mon.c.vm04.stdout: raise self._exception 2026-03-09T00:15:27.283 INFO:journalctl@ceph.mon.c.vm04.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1381, in _create_daemon 2026-03-09T00:15:27.284 INFO:journalctl@ceph.mon.c.vm04.stdout: out, err, code = await self._run_cephadm( 2026-03-09T00:15:27.284 INFO:journalctl@ceph.mon.c.vm04.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1724, in _run_cephadm 2026-03-09T00:15:27.284 INFO:journalctl@ceph.mon.c.vm04.stdout: raise OrchestratorError( 2026-03-09T00:15:27.284 INFO:journalctl@ceph.mon.c.vm04.stdout: orchestrator._interface.OrchestratorError: cephadm exited with an error code: 1, stderr: Reconfig daemon iscsi.foo.vm04.fbyciv ... 2026-03-09T00:15:27.284 INFO:journalctl@ceph.mon.c.vm04.stdout: Creating ceph-iscsi config... 2026-03-09T00:15:27.284 INFO:journalctl@ceph.mon.c.vm04.stdout: Write file: /var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/iscsi.foo.vm04.fbyciv/iscsi-gateway.cfg 2026-03-09T00:15:27.284 INFO:journalctl@ceph.mon.c.vm04.stdout: Write file: /var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/iscsi.foo.vm04.fbyciv/tcmu-runner-entrypoint.sh 2026-03-09T00:15:27.284 INFO:journalctl@ceph.mon.c.vm04.stdout: Non-zero exit code 1 from systemctl restart ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv 2026-03-09T00:15:27.284 INFO:journalctl@ceph.mon.c.vm04.stdout: systemctl: stderr Job for ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service failed because the control process exited with error code. 2026-03-09T00:15:27.284 INFO:journalctl@ceph.mon.c.vm04.stdout: systemctl: stderr See "systemctl status ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" and "journalctl -xeu ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" for details. 2026-03-09T00:15:27.284 INFO:journalctl@ceph.mon.c.vm04.stdout: Traceback (most recent call last): 2026-03-09T00:15:27.284 INFO:journalctl@ceph.mon.c.vm04.stdout: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-09T00:15:27.284 INFO:journalctl@ceph.mon.c.vm04.stdout: return _run_code(code, main_globals, None, 2026-03-09T00:15:27.284 INFO:journalctl@ceph.mon.c.vm04.stdout: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-09T00:15:27.284 INFO:journalctl@ceph.mon.c.vm04.stdout: exec(code, run_globals) 2026-03-09T00:15:27.284 INFO:journalctl@ceph.mon.c.vm04.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5592, in 2026-03-09T00:15:27.284 INFO:journalctl@ceph.mon.c.vm04.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5580, in main 2026-03-09T00:15:27.284 INFO:journalctl@ceph.mon.c.vm04.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3062, in command_deploy_from 2026-03-09T00:15:27.284 INFO:journalctl@ceph.mon.c.vm04.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3097, in _common_deploy 2026-03-09T00:15:27.284 INFO:journalctl@ceph.mon.c.vm04.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3117, in _deploy_daemon_container 2026-03-09T00:15:27.284 INFO:journalctl@ceph.mon.c.vm04.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 1126, in deploy_daemon 2026-03-09T00:15:27.284 INFO:journalctl@ceph.mon.c.vm04.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/cephadmlib/call_wrappers.py", line 307, in call_throws 2026-03-09T00:15:27.284 INFO:journalctl@ceph.mon.c.vm04.stdout: RuntimeError: Failed command: systemctl restart ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv: Job for ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service failed because the control process exited with error code. 2026-03-09T00:15:27.284 INFO:journalctl@ceph.mon.c.vm04.stdout: See "systemctl status ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" and "journalctl -xeu ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" for details. 2026-03-09T00:15:27.284 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:26 vm04 ceph-mon[51053]: Reconfiguring prometheus.a (dependencies changed)... 2026-03-09T00:15:27.284 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:26 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:27.284 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:26 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:27.284 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:26 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-09T00:15:27.284 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:26 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm10.local:9095"}]: dispatch 2026-03-09T00:15:27.284 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:26 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:27.284 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:15:26 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: [09/Mar/2026:00:15:26] ENGINE Bus STOPPING 2026-03-09T00:15:27.284 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:15:27 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: [09/Mar/2026:00:15:27] ENGINE HTTP Server cherrypy._cpwsgi_server.CPWSGIServer(('::', 9283)) shut down 2026-03-09T00:15:27.284 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:15:27 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: [09/Mar/2026:00:15:27] ENGINE Bus STOPPED 2026-03-09T00:15:27.284 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:15:27 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: [09/Mar/2026:00:15:27] ENGINE Bus STARTING 2026-03-09T00:15:27.284 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:26 vm04 ceph-mon[46823]: pgmap v5: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail 2026-03-09T00:15:27.284 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:26 vm04 ceph-mon[46823]: mgrmap e39: y(active, since 5s), standbys: x 2026-03-09T00:15:27.284 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:26 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-09T00:15:27.284 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:26 vm04 ceph-mon[46823]: cephadm exited with an error code: 1, stderr: Reconfig daemon iscsi.foo.vm04.fbyciv ... 2026-03-09T00:15:27.284 INFO:journalctl@ceph.mon.a.vm04.stdout: Creating ceph-iscsi config... 2026-03-09T00:15:27.284 INFO:journalctl@ceph.mon.a.vm04.stdout: Write file: /var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/iscsi.foo.vm04.fbyciv/iscsi-gateway.cfg 2026-03-09T00:15:27.284 INFO:journalctl@ceph.mon.a.vm04.stdout: Write file: /var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/iscsi.foo.vm04.fbyciv/tcmu-runner-entrypoint.sh 2026-03-09T00:15:27.284 INFO:journalctl@ceph.mon.a.vm04.stdout: Non-zero exit code 1 from systemctl restart ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv 2026-03-09T00:15:27.284 INFO:journalctl@ceph.mon.a.vm04.stdout: systemctl: stderr Job for ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service failed because the control process exited with error code. 2026-03-09T00:15:27.284 INFO:journalctl@ceph.mon.a.vm04.stdout: systemctl: stderr See "systemctl status ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" and "journalctl -xeu ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" for details. 2026-03-09T00:15:27.284 INFO:journalctl@ceph.mon.a.vm04.stdout: Traceback (most recent call last): 2026-03-09T00:15:27.284 INFO:journalctl@ceph.mon.a.vm04.stdout: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-09T00:15:27.284 INFO:journalctl@ceph.mon.a.vm04.stdout: return _run_code(code, main_globals, None, 2026-03-09T00:15:27.284 INFO:journalctl@ceph.mon.a.vm04.stdout: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-09T00:15:27.284 INFO:journalctl@ceph.mon.a.vm04.stdout: exec(code, run_globals) 2026-03-09T00:15:27.284 INFO:journalctl@ceph.mon.a.vm04.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5592, in 2026-03-09T00:15:27.284 INFO:journalctl@ceph.mon.a.vm04.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5580, in main 2026-03-09T00:15:27.284 INFO:journalctl@ceph.mon.a.vm04.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3062, in command_deploy_from 2026-03-09T00:15:27.285 INFO:journalctl@ceph.mon.a.vm04.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3097, in _common_deploy 2026-03-09T00:15:27.285 INFO:journalctl@ceph.mon.a.vm04.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3117, in _deploy_daemon_container 2026-03-09T00:15:27.285 INFO:journalctl@ceph.mon.a.vm04.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 1126, in deploy_daemon 2026-03-09T00:15:27.285 INFO:journalctl@ceph.mon.a.vm04.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/cephadmlib/call_wrappers.py", line 307, in call_throws 2026-03-09T00:15:27.285 INFO:journalctl@ceph.mon.a.vm04.stdout: RuntimeError: Failed command: systemctl restart ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv: Job for ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service failed because the control process exited with error code. 2026-03-09T00:15:27.285 INFO:journalctl@ceph.mon.a.vm04.stdout: See "systemctl status ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" and "journalctl -xeu ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" for details. 2026-03-09T00:15:27.285 INFO:journalctl@ceph.mon.a.vm04.stdout: Traceback (most recent call last): 2026-03-09T00:15:27.285 INFO:journalctl@ceph.mon.a.vm04.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1145, in _check_daemons 2026-03-09T00:15:27.285 INFO:journalctl@ceph.mon.a.vm04.stdout: self.mgr._daemon_action(daemon_spec, action=action) 2026-03-09T00:15:27.285 INFO:journalctl@ceph.mon.a.vm04.stdout: File "/usr/share/ceph/mgr/cephadm/module.py", line 2545, in _daemon_action 2026-03-09T00:15:27.285 INFO:journalctl@ceph.mon.a.vm04.stdout: return self.wait_async( 2026-03-09T00:15:27.285 INFO:journalctl@ceph.mon.a.vm04.stdout: File "/usr/share/ceph/mgr/cephadm/module.py", line 815, in wait_async 2026-03-09T00:15:27.285 INFO:journalctl@ceph.mon.a.vm04.stdout: return self.event_loop.get_result(coro, timeout) 2026-03-09T00:15:27.285 INFO:journalctl@ceph.mon.a.vm04.stdout: File "/usr/share/ceph/mgr/cephadm/ssh.py", line 136, in get_result 2026-03-09T00:15:27.285 INFO:journalctl@ceph.mon.a.vm04.stdout: return future.result(timeout) 2026-03-09T00:15:27.285 INFO:journalctl@ceph.mon.a.vm04.stdout: File "/lib64/python3.9/concurrent/futures/_base.py", line 446, in result 2026-03-09T00:15:27.285 INFO:journalctl@ceph.mon.a.vm04.stdout: return self.__get_result() 2026-03-09T00:15:27.285 INFO:journalctl@ceph.mon.a.vm04.stdout: File "/lib64/python3.9/concurrent/futures/_base.py", line 391, in __get_result 2026-03-09T00:15:27.285 INFO:journalctl@ceph.mon.a.vm04.stdout: raise self._exception 2026-03-09T00:15:27.285 INFO:journalctl@ceph.mon.a.vm04.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1381, in _create_daemon 2026-03-09T00:15:27.285 INFO:journalctl@ceph.mon.a.vm04.stdout: out, err, code = await self._run_cephadm( 2026-03-09T00:15:27.285 INFO:journalctl@ceph.mon.a.vm04.stdout: File "/usr/share/ceph/mgr/cephadm/serve.py", line 1724, in _run_cephadm 2026-03-09T00:15:27.285 INFO:journalctl@ceph.mon.a.vm04.stdout: raise OrchestratorError( 2026-03-09T00:15:27.285 INFO:journalctl@ceph.mon.a.vm04.stdout: orchestrator._interface.OrchestratorError: cephadm exited with an error code: 1, stderr: Reconfig daemon iscsi.foo.vm04.fbyciv ... 2026-03-09T00:15:27.285 INFO:journalctl@ceph.mon.a.vm04.stdout: Creating ceph-iscsi config... 2026-03-09T00:15:27.285 INFO:journalctl@ceph.mon.a.vm04.stdout: Write file: /var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/iscsi.foo.vm04.fbyciv/iscsi-gateway.cfg 2026-03-09T00:15:27.285 INFO:journalctl@ceph.mon.a.vm04.stdout: Write file: /var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/iscsi.foo.vm04.fbyciv/tcmu-runner-entrypoint.sh 2026-03-09T00:15:27.285 INFO:journalctl@ceph.mon.a.vm04.stdout: Non-zero exit code 1 from systemctl restart ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv 2026-03-09T00:15:27.285 INFO:journalctl@ceph.mon.a.vm04.stdout: systemctl: stderr Job for ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service failed because the control process exited with error code. 2026-03-09T00:15:27.285 INFO:journalctl@ceph.mon.a.vm04.stdout: systemctl: stderr See "systemctl status ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" and "journalctl -xeu ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" for details. 2026-03-09T00:15:27.285 INFO:journalctl@ceph.mon.a.vm04.stdout: Traceback (most recent call last): 2026-03-09T00:15:27.285 INFO:journalctl@ceph.mon.a.vm04.stdout: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-09T00:15:27.285 INFO:journalctl@ceph.mon.a.vm04.stdout: return _run_code(code, main_globals, None, 2026-03-09T00:15:27.285 INFO:journalctl@ceph.mon.a.vm04.stdout: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-09T00:15:27.285 INFO:journalctl@ceph.mon.a.vm04.stdout: exec(code, run_globals) 2026-03-09T00:15:27.285 INFO:journalctl@ceph.mon.a.vm04.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5592, in 2026-03-09T00:15:27.285 INFO:journalctl@ceph.mon.a.vm04.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5580, in main 2026-03-09T00:15:27.285 INFO:journalctl@ceph.mon.a.vm04.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3062, in command_deploy_from 2026-03-09T00:15:27.285 INFO:journalctl@ceph.mon.a.vm04.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3097, in _common_deploy 2026-03-09T00:15:27.285 INFO:journalctl@ceph.mon.a.vm04.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3117, in _deploy_daemon_container 2026-03-09T00:15:27.285 INFO:journalctl@ceph.mon.a.vm04.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 1126, in deploy_daemon 2026-03-09T00:15:27.285 INFO:journalctl@ceph.mon.a.vm04.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/cephadmlib/call_wrappers.py", line 307, in call_throws 2026-03-09T00:15:27.285 INFO:journalctl@ceph.mon.a.vm04.stdout: RuntimeError: Failed command: systemctl restart ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv: Job for ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service failed because the control process exited with error code. 2026-03-09T00:15:27.285 INFO:journalctl@ceph.mon.a.vm04.stdout: See "systemctl status ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" and "journalctl -xeu ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" for details. 2026-03-09T00:15:27.285 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:26 vm04 ceph-mon[46823]: Reconfiguring prometheus.a (dependencies changed)... 2026-03-09T00:15:27.285 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:26 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:27.285 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:26 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:27.285 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:26 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-09T00:15:27.285 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:26 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm10.local:9095"}]: dispatch 2026-03-09T00:15:27.285 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:26 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:27.600 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:15:27 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: [09/Mar/2026:00:15:27] ENGINE Serving on http://:::9283 2026-03-09T00:15:27.601 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:15:27 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: [09/Mar/2026:00:15:27] ENGINE Bus STARTED 2026-03-09T00:15:27.828 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:15:27 vm10 systemd[1]: Stopping Ceph mgr.x for fdcbddf6-1b49-11f1-80b0-7392062373f9... 2026-03-09T00:15:28.154 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:27 vm10 ceph-mon[48982]: Reconfiguring daemon prometheus.a on vm10 2026-03-09T00:15:28.154 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:27 vm10 ceph-mon[48982]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-09T00:15:28.154 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:27 vm10 ceph-mon[48982]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm10.local:9095"}]: dispatch 2026-03-09T00:15:28.154 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:27 vm10 ceph-mon[48982]: mgrmap e40: y(active, since 6s), standbys: x 2026-03-09T00:15:28.154 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:27 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:15:28.154 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:27 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-09T00:15:28.154 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:27 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-09T00:15:28.154 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:27 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:15:28.155 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:15:27 vm10 podman[79059]: 2026-03-09 00:15:27.891279528 +0000 UTC m=+0.064403808 container died 2d7d59a967f30e363c77f798c9f618a193560dc44e86c19ca25014c48203ee86 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.license=GPLv2, OSD_FLAVOR=default, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid) 2026-03-09T00:15:28.155 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:15:27 vm10 podman[79059]: 2026-03-09 00:15:27.914733555 +0000 UTC m=+0.087857835 container remove 2d7d59a967f30e363c77f798c9f618a193560dc44e86c19ca25014c48203ee86 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x, org.label-schema.schema-version=1.0, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20260223, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team ) 2026-03-09T00:15:28.155 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:15:27 vm10 bash[79059]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x 2026-03-09T00:15:28.155 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:15:27 vm10 systemd[1]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@mgr.x.service: Main process exited, code=exited, status=143/n/a 2026-03-09T00:15:28.155 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:15:27 vm10 systemd[1]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@mgr.x.service: Failed with result 'exit-code'. 2026-03-09T00:15:28.155 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:15:27 vm10 systemd[1]: Stopped Ceph mgr.x for fdcbddf6-1b49-11f1-80b0-7392062373f9. 2026-03-09T00:15:28.155 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:15:27 vm10 systemd[1]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@mgr.x.service: Consumed 20.020s CPU time. 2026-03-09T00:15:28.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:27 vm04 ceph-mon[51053]: Reconfiguring daemon prometheus.a on vm10 2026-03-09T00:15:28.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:27 vm04 ceph-mon[51053]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-09T00:15:28.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:27 vm04 ceph-mon[51053]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm10.local:9095"}]: dispatch 2026-03-09T00:15:28.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:27 vm04 ceph-mon[51053]: mgrmap e40: y(active, since 6s), standbys: x 2026-03-09T00:15:28.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:27 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:15:28.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:27 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-09T00:15:28.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:27 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-09T00:15:28.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:27 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:15:28.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:27 vm04 ceph-mon[46823]: Reconfiguring daemon prometheus.a on vm10 2026-03-09T00:15:28.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:27 vm04 ceph-mon[46823]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-09T00:15:28.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:27 vm04 ceph-mon[46823]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm10.local:9095"}]: dispatch 2026-03-09T00:15:28.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:27 vm04 ceph-mon[46823]: mgrmap e40: y(active, since 6s), standbys: x 2026-03-09T00:15:28.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:27 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:15:28.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:27 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-09T00:15:28.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:27 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-09T00:15:28.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:27 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:15:28.440 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:15:28 vm10 systemd[1]: Starting Ceph mgr.x for fdcbddf6-1b49-11f1-80b0-7392062373f9... 2026-03-09T00:15:28.440 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:15:28 vm10 podman[79170]: 2026-03-09 00:15:28.277958403 +0000 UTC m=+0.021342716 container create c971ca6e9652b11aac0051a41c6d2921ffdf773feaf016b7dffa9e312686affc (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20260223, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, CEPH_REF=squid, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git) 2026-03-09T00:15:28.440 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:15:28 vm10 podman[79170]: 2026-03-09 00:15:28.311569249 +0000 UTC m=+0.054953572 container init c971ca6e9652b11aac0051a41c6d2921ffdf773feaf016b7dffa9e312686affc (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.build-date=20260223, io.buildah.version=1.41.3, ceph=True, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, CEPH_REF=squid, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0) 2026-03-09T00:15:28.440 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:15:28 vm10 podman[79170]: 2026-03-09 00:15:28.322522268 +0000 UTC m=+0.065906591 container start c971ca6e9652b11aac0051a41c6d2921ffdf773feaf016b7dffa9e312686affc (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, CEPH_REF=squid, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.build-date=20260223, io.buildah.version=1.41.3, ceph=True, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0) 2026-03-09T00:15:28.440 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:15:28 vm10 bash[79170]: c971ca6e9652b11aac0051a41c6d2921ffdf773feaf016b7dffa9e312686affc 2026-03-09T00:15:28.440 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:15:28 vm10 podman[79170]: 2026-03-09 00:15:28.268912323 +0000 UTC m=+0.012296646 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc 2026-03-09T00:15:28.440 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:15:28 vm10 systemd[1]: Started Ceph mgr.x for fdcbddf6-1b49-11f1-80b0-7392062373f9. 2026-03-09T00:15:28.440 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:15:28 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[79180]: 2026-03-09T00:15:28.432+0000 7f000b32b140 -1 mgr[py] Module alerts has missing NOTIFY_TYPES member 2026-03-09T00:15:28.780 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:15:28 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[79180]: 2026-03-09T00:15:28.478+0000 7f000b32b140 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-09T00:15:29.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:29 vm10 ceph-mon[48982]: Upgrade: Updating mgr.x 2026-03-09T00:15:29.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:29 vm10 ceph-mon[48982]: Deploying daemon mgr.x on vm10 2026-03-09T00:15:29.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:29 vm10 ceph-mon[48982]: pgmap v6: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail 2026-03-09T00:15:29.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:29 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:29.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:29 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:29.329 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:29 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:15:29.329 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:15:29 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[79180]: 2026-03-09T00:15:29.292+0000 7f000b32b140 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-09T00:15:29.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:29 vm04 ceph-mon[51053]: Upgrade: Updating mgr.x 2026-03-09T00:15:29.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:29 vm04 ceph-mon[51053]: Deploying daemon mgr.x on vm10 2026-03-09T00:15:29.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:29 vm04 ceph-mon[51053]: pgmap v6: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail 2026-03-09T00:15:29.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:29 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:29.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:29 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:29.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:29 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:15:29.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:29 vm04 ceph-mon[46823]: Upgrade: Updating mgr.x 2026-03-09T00:15:29.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:29 vm04 ceph-mon[46823]: Deploying daemon mgr.x on vm10 2026-03-09T00:15:29.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:29 vm04 ceph-mon[46823]: pgmap v6: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail 2026-03-09T00:15:29.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:29 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:29.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:29 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:29.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:29 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:15:29.749 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:15:29 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[79180]: 2026-03-09T00:15:29.647+0000 7f000b32b140 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-09T00:15:29.749 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:15:29 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[79180]: /lib64/python3.9/site-packages/scipy/__init__.py:73: UserWarning: NumPy was imported from a Python sub-interpreter but NumPy does not properly support sub-interpreters. This will likely work for most users but might cause hard to track down issues or subtle bugs. A common user of the rare sub-interpreter feature is wsgi which also allows single-interpreter mode. 2026-03-09T00:15:29.749 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:15:29 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[79180]: Improvements in the case of bugs are welcome, but is not on the NumPy roadmap, and full support may require significant effort to achieve. 2026-03-09T00:15:29.749 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:15:29 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[79180]: from numpy import show_config as show_numpy_config 2026-03-09T00:15:30.078 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:15:29 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[79180]: 2026-03-09T00:15:29.752+0000 7f000b32b140 -1 mgr[py] Module diskprediction_local has missing NOTIFY_TYPES member 2026-03-09T00:15:30.078 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:15:29 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[79180]: 2026-03-09T00:15:29.794+0000 7f000b32b140 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-09T00:15:30.078 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:15:29 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[79180]: 2026-03-09T00:15:29.884+0000 7f000b32b140 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-09T00:15:30.676 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:15:30 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[79180]: 2026-03-09T00:15:30.409+0000 7f000b32b140 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-09T00:15:30.676 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:15:30 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[79180]: 2026-03-09T00:15:30.527+0000 7f000b32b140 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-09T00:15:30.676 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:15:30 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[79180]: 2026-03-09T00:15:30.576+0000 7f000b32b140 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-09T00:15:30.676 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:15:30 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[79180]: 2026-03-09T00:15:30.630+0000 7f000b32b140 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-09T00:15:30.965 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:30 vm10 ceph-mon[48982]: pgmap v7: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 26 KiB/s rd, 0 B/s wr, 11 op/s 2026-03-09T00:15:30.965 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:30 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:30.966 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:30 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:30.966 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:30 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:30.966 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:30 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:30.966 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:15:30 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[79180]: 2026-03-09T00:15:30.672+0000 7f000b32b140 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-09T00:15:30.966 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:15:30 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[79180]: 2026-03-09T00:15:30.724+0000 7f000b32b140 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-09T00:15:30.966 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:15:30 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[79180]: 2026-03-09T00:15:30.913+0000 7f000b32b140 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-09T00:15:31.328 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:15:30 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[79180]: 2026-03-09T00:15:30.964+0000 7f000b32b140 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-09T00:15:31.328 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:15:31 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[79180]: 2026-03-09T00:15:31.188+0000 7f000b32b140 -1 mgr[py] Module rgw has missing NOTIFY_TYPES member 2026-03-09T00:15:31.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:30 vm04 ceph-mon[51053]: pgmap v7: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 26 KiB/s rd, 0 B/s wr, 11 op/s 2026-03-09T00:15:31.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:30 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:31.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:30 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:31.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:30 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:31.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:30 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:31.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:30 vm04 ceph-mon[46823]: pgmap v7: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 26 KiB/s rd, 0 B/s wr, 11 op/s 2026-03-09T00:15:31.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:30 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:31.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:30 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:31.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:30 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:31.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:30 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:31.657 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:15:31 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[79180]: 2026-03-09T00:15:31.487+0000 7f000b32b140 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-09T00:15:31.657 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:15:31 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[79180]: 2026-03-09T00:15:31.525+0000 7f000b32b140 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-09T00:15:31.657 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:15:31 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[79180]: 2026-03-09T00:15:31.569+0000 7f000b32b140 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-09T00:15:31.657 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:15:31 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[79180]: 2026-03-09T00:15:31.655+0000 7f000b32b140 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-09T00:15:32.047 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:15:31 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[79180]: 2026-03-09T00:15:31.699+0000 7f000b32b140 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-09T00:15:32.047 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:15:31 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[79180]: 2026-03-09T00:15:31.786+0000 7f000b32b140 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-09T00:15:32.047 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:15:31 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[79180]: 2026-03-09T00:15:31.903+0000 7f000b32b140 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-09T00:15:32.328 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:15:32 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[79180]: 2026-03-09T00:15:32.045+0000 7f000b32b140 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-09T00:15:32.328 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:15:32 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[79180]: 2026-03-09T00:15:32.084+0000 7f000b32b140 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-09T00:15:32.328 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:15:32 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[79180]: [09/Mar/2026:00:15:32] ENGINE Bus STARTING 2026-03-09T00:15:32.328 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:15:32 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[79180]: CherryPy Checker: 2026-03-09T00:15:32.328 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:15:32 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[79180]: The Application mounted at '' has an empty config. 2026-03-09T00:15:32.328 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:15:32 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[79180]: 2026-03-09T00:15:32.328 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:15:32 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[79180]: [09/Mar/2026:00:15:32] ENGINE Serving on http://:::9283 2026-03-09T00:15:32.328 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:15:32 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[79180]: [09/Mar/2026:00:15:32] ENGINE Bus STARTED 2026-03-09T00:15:32.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:32 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:32.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:32 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:32.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:32 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:15:32.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:32 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:15:32.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:32 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:32.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:32 vm10 ceph-mon[48982]: pgmap v8: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 20 KiB/s rd, 0 B/s wr, 8 op/s 2026-03-09T00:15:32.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:32 vm10 ceph-mon[48982]: Reconfiguring iscsi.foo.vm04.fbyciv (dependencies changed)... 2026-03-09T00:15:32.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:32 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm04.fbyciv", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T00:15:32.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:32 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:15:32.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:32 vm10 ceph-mon[48982]: Reconfiguring daemon iscsi.foo.vm04.fbyciv on vm04 2026-03-09T00:15:32.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:32 vm10 ceph-mon[48982]: Standby manager daemon x restarted 2026-03-09T00:15:32.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:32 vm10 ceph-mon[48982]: Standby manager daemon x started 2026-03-09T00:15:32.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:32 vm10 ceph-mon[48982]: from='mgr.? 192.168.123.110:0/2234184148' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-09T00:15:32.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:32 vm10 ceph-mon[48982]: from='mgr.? 192.168.123.110:0/2234184148' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-09T00:15:32.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:32 vm10 ceph-mon[48982]: from='mgr.? 192.168.123.110:0/2234184148' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-09T00:15:32.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:32 vm10 ceph-mon[48982]: from='mgr.? 192.168.123.110:0/2234184148' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-09T00:15:32.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:32 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:32.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:32 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:32.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:32 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-09T00:15:32.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:32 vm10 ceph-mon[48982]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-09T00:15:32.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:32 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-09T00:15:32.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:32 vm10 ceph-mon[48982]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-09T00:15:32.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:32 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:15:32.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:32 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:15:32.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:32 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:32.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:32 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr.x"}]: dispatch 2026-03-09T00:15:32.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:32 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr.x"}]': finished 2026-03-09T00:15:32.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:32 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr.y"}]: dispatch 2026-03-09T00:15:32.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:32 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr.y"}]': finished 2026-03-09T00:15:32.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:32 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:15:32.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:32 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:32.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:32 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:15:32.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:32 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:32.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:32 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:15:32.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:32 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:32.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:32 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:15:32.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:32 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:32.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:32 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:15:32.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:32 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:32.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:32 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:15:32.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:32 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:32.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:32 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:15:32.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:32 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:32.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:32 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:15:32.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:32 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:15:32.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:32 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:15:32.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:32 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:32.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:32 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:32.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:32 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:15:32.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:32 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:15:32.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:32 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:32.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:32 vm04 ceph-mon[51053]: pgmap v8: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 20 KiB/s rd, 0 B/s wr, 8 op/s 2026-03-09T00:15:32.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:32 vm04 ceph-mon[51053]: Reconfiguring iscsi.foo.vm04.fbyciv (dependencies changed)... 2026-03-09T00:15:32.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:32 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm04.fbyciv", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T00:15:32.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:32 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:15:32.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:32 vm04 ceph-mon[51053]: Reconfiguring daemon iscsi.foo.vm04.fbyciv on vm04 2026-03-09T00:15:32.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:32 vm04 ceph-mon[51053]: Standby manager daemon x restarted 2026-03-09T00:15:32.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:32 vm04 ceph-mon[51053]: Standby manager daemon x started 2026-03-09T00:15:32.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:32 vm04 ceph-mon[51053]: from='mgr.? 192.168.123.110:0/2234184148' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-09T00:15:32.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:32 vm04 ceph-mon[51053]: from='mgr.? 192.168.123.110:0/2234184148' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-09T00:15:32.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:32 vm04 ceph-mon[51053]: from='mgr.? 192.168.123.110:0/2234184148' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-09T00:15:32.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:32 vm04 ceph-mon[51053]: from='mgr.? 192.168.123.110:0/2234184148' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-09T00:15:32.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:32 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:32.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:32 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:32.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:32 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-09T00:15:32.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:32 vm04 ceph-mon[51053]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-09T00:15:32.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:32 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-09T00:15:32.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:32 vm04 ceph-mon[51053]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-09T00:15:32.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:32 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:15:32.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:32 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:15:32.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:32 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:32.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:32 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr.x"}]: dispatch 2026-03-09T00:15:32.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:32 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr.x"}]': finished 2026-03-09T00:15:32.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:32 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr.y"}]: dispatch 2026-03-09T00:15:32.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:32 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr.y"}]': finished 2026-03-09T00:15:32.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:32 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:15:32.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:32 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:32.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:32 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:15:32.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:32 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:32.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:32 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:15:32.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:32 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:32.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:32 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:15:32.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:32 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:32.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:32 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:15:32.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:32 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:32.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:32 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:15:32.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:32 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:32.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:32 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:15:32.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:32 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:32.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:32 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:15:32.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:32 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:15:32.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:32 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:15:32.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:32 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:32.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:32 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:32.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:32 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:15:32.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:32 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:15:32.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:32 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:32.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:32 vm04 ceph-mon[46823]: pgmap v8: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 20 KiB/s rd, 0 B/s wr, 8 op/s 2026-03-09T00:15:32.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:32 vm04 ceph-mon[46823]: Reconfiguring iscsi.foo.vm04.fbyciv (dependencies changed)... 2026-03-09T00:15:32.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:32 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm04.fbyciv", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T00:15:32.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:32 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:15:32.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:32 vm04 ceph-mon[46823]: Reconfiguring daemon iscsi.foo.vm04.fbyciv on vm04 2026-03-09T00:15:32.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:32 vm04 ceph-mon[46823]: Standby manager daemon x restarted 2026-03-09T00:15:32.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:32 vm04 ceph-mon[46823]: Standby manager daemon x started 2026-03-09T00:15:32.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:32 vm04 ceph-mon[46823]: from='mgr.? 192.168.123.110:0/2234184148' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-09T00:15:32.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:32 vm04 ceph-mon[46823]: from='mgr.? 192.168.123.110:0/2234184148' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-09T00:15:32.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:32 vm04 ceph-mon[46823]: from='mgr.? 192.168.123.110:0/2234184148' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-09T00:15:32.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:32 vm04 ceph-mon[46823]: from='mgr.? 192.168.123.110:0/2234184148' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-09T00:15:32.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:32 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:32.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:32 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:32.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:32 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-09T00:15:32.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:32 vm04 ceph-mon[46823]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-09T00:15:32.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:32 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-09T00:15:32.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:32 vm04 ceph-mon[46823]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-09T00:15:32.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:32 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:15:32.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:32 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:15:32.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:32 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:32.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:32 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr.x"}]: dispatch 2026-03-09T00:15:32.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:32 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr.x"}]': finished 2026-03-09T00:15:32.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:32 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr.y"}]: dispatch 2026-03-09T00:15:32.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:32 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr.y"}]': finished 2026-03-09T00:15:32.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:32 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:15:32.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:32 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:32.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:32 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:15:32.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:32 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:32.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:32 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:15:32.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:32 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:32.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:32 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:15:32.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:32 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:32.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:32 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:15:32.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:32 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:32.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:32 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:15:32.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:32 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:32.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:32 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:15:32.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:32 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:32.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:32 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:15:32.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:32 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:15:32.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:32 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:15:33.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:33 vm10 ceph-mon[48982]: Upgrade: Setting container_image for all mgr 2026-03-09T00:15:33.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:33 vm10 ceph-mon[48982]: Upgrade: Setting container_image for all crash 2026-03-09T00:15:33.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:33 vm10 ceph-mon[48982]: Upgrade: Setting container_image for all mds 2026-03-09T00:15:33.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:33 vm10 ceph-mon[48982]: Upgrade: Setting container_image for all rbd-mirror 2026-03-09T00:15:33.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:33 vm10 ceph-mon[48982]: Upgrade: Setting container_image for all cephfs-mirror 2026-03-09T00:15:33.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:33 vm10 ceph-mon[48982]: Upgrade: Setting container_image for all ceph-exporter 2026-03-09T00:15:33.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:33 vm10 ceph-mon[48982]: Upgrade: Setting container_image for all nfs 2026-03-09T00:15:33.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:33 vm10 ceph-mon[48982]: Upgrade: Setting container_image for all nvmeof 2026-03-09T00:15:33.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:33 vm10 ceph-mon[48982]: Upgrade: Setting container_image for all node-exporter 2026-03-09T00:15:33.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:33 vm10 ceph-mon[48982]: Upgrade: Setting container_image for all prometheus 2026-03-09T00:15:33.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:33 vm10 ceph-mon[48982]: Upgrade: Setting container_image for all alertmanager 2026-03-09T00:15:33.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:33 vm10 ceph-mon[48982]: mgrmap e41: y(active, since 12s), standbys: x 2026-03-09T00:15:33.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:33 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/1992634387' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-09T00:15:33.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:33 vm10 ceph-mon[48982]: Upgrade: Updating grafana.a 2026-03-09T00:15:33.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:33 vm10 ceph-mon[48982]: Deploying daemon grafana.a on vm10 2026-03-09T00:15:33.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:33 vm04 ceph-mon[51053]: Upgrade: Setting container_image for all mgr 2026-03-09T00:15:33.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:33 vm04 ceph-mon[51053]: Upgrade: Setting container_image for all crash 2026-03-09T00:15:33.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:33 vm04 ceph-mon[51053]: Upgrade: Setting container_image for all mds 2026-03-09T00:15:33.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:33 vm04 ceph-mon[51053]: Upgrade: Setting container_image for all rbd-mirror 2026-03-09T00:15:33.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:33 vm04 ceph-mon[51053]: Upgrade: Setting container_image for all cephfs-mirror 2026-03-09T00:15:33.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:33 vm04 ceph-mon[51053]: Upgrade: Setting container_image for all ceph-exporter 2026-03-09T00:15:33.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:33 vm04 ceph-mon[51053]: Upgrade: Setting container_image for all nfs 2026-03-09T00:15:33.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:33 vm04 ceph-mon[51053]: Upgrade: Setting container_image for all nvmeof 2026-03-09T00:15:33.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:33 vm04 ceph-mon[51053]: Upgrade: Setting container_image for all node-exporter 2026-03-09T00:15:33.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:33 vm04 ceph-mon[51053]: Upgrade: Setting container_image for all prometheus 2026-03-09T00:15:33.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:33 vm04 ceph-mon[51053]: Upgrade: Setting container_image for all alertmanager 2026-03-09T00:15:33.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:33 vm04 ceph-mon[51053]: mgrmap e41: y(active, since 12s), standbys: x 2026-03-09T00:15:33.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:33 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/1992634387' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-09T00:15:33.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:33 vm04 ceph-mon[51053]: Upgrade: Updating grafana.a 2026-03-09T00:15:33.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:33 vm04 ceph-mon[51053]: Deploying daemon grafana.a on vm10 2026-03-09T00:15:33.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:33 vm04 ceph-mon[46823]: Upgrade: Setting container_image for all mgr 2026-03-09T00:15:33.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:33 vm04 ceph-mon[46823]: Upgrade: Setting container_image for all crash 2026-03-09T00:15:33.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:33 vm04 ceph-mon[46823]: Upgrade: Setting container_image for all mds 2026-03-09T00:15:33.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:33 vm04 ceph-mon[46823]: Upgrade: Setting container_image for all rbd-mirror 2026-03-09T00:15:33.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:33 vm04 ceph-mon[46823]: Upgrade: Setting container_image for all cephfs-mirror 2026-03-09T00:15:33.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:33 vm04 ceph-mon[46823]: Upgrade: Setting container_image for all ceph-exporter 2026-03-09T00:15:33.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:33 vm04 ceph-mon[46823]: Upgrade: Setting container_image for all nfs 2026-03-09T00:15:33.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:33 vm04 ceph-mon[46823]: Upgrade: Setting container_image for all nvmeof 2026-03-09T00:15:33.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:33 vm04 ceph-mon[46823]: Upgrade: Setting container_image for all node-exporter 2026-03-09T00:15:33.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:33 vm04 ceph-mon[46823]: Upgrade: Setting container_image for all prometheus 2026-03-09T00:15:33.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:33 vm04 ceph-mon[46823]: Upgrade: Setting container_image for all alertmanager 2026-03-09T00:15:33.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:33 vm04 ceph-mon[46823]: mgrmap e41: y(active, since 12s), standbys: x 2026-03-09T00:15:33.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:33 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/1992634387' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-09T00:15:33.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:33 vm04 ceph-mon[46823]: Upgrade: Updating grafana.a 2026-03-09T00:15:33.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:33 vm04 ceph-mon[46823]: Deploying daemon grafana.a on vm10 2026-03-09T00:15:34.762 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:34 vm04 ceph-mon[51053]: pgmap v9: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 16 KiB/s rd, 0 B/s wr, 7 op/s 2026-03-09T00:15:34.762 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:34 vm04 ceph-mon[46823]: pgmap v9: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 16 KiB/s rd, 0 B/s wr, 7 op/s 2026-03-09T00:15:34.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:34 vm10 ceph-mon[48982]: pgmap v9: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 16 KiB/s rd, 0 B/s wr, 7 op/s 2026-03-09T00:15:35.351 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:15:35 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:15:35] "GET /metrics HTTP/1.1" 200 34543 "" "Prometheus/2.51.0" 2026-03-09T00:15:36.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:36 vm10 ceph-mon[48982]: pgmap v10: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 0 B/s wr, 6 op/s 2026-03-09T00:15:36.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:36 vm04 ceph-mon[51053]: pgmap v10: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 0 B/s wr, 6 op/s 2026-03-09T00:15:36.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:36 vm04 ceph-mon[46823]: pgmap v10: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 0 B/s wr, 6 op/s 2026-03-09T00:15:37.351 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:15:36 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:15:36.994Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:15:37.351 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:15:36 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:15:36.995Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:15:37.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:37 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:15:37.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:37 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:15:37.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:37 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:15:39.117 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:39 vm10 ceph-mon[48982]: pgmap v11: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 0 B/s wr, 6 op/s 2026-03-09T00:15:39.117 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:38 vm10 systemd[1]: Stopping Ceph grafana.a for fdcbddf6-1b49-11f1-80b0-7392062373f9... 2026-03-09T00:15:39.117 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:38 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[70846]: t=2026-03-09T00:15:38+0000 lvl=info msg="Shutdown started" logger=server reason="System signal: terminated" 2026-03-09T00:15:39.117 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:38 vm10 podman[80397]: 2026-03-09 00:15:38.937532037 +0000 UTC m=+0.027925426 container died 9fb25843918bddd8da0697768c2a38bac3ffe45e5b4ab57da26320250e1a5465 (image=quay.io/ceph/ceph-grafana:8.3.5, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a, distribution-scope=public, vcs-type=git, version=8.5, io.k8s.display-name=Red Hat Universal Base Image 8, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.5-236.1648460182, release=236.1648460182, build-date=2022-03-28T10:36:18.413762, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.tags=base rhel8, vendor=Red Hat, Inc., description=Ceph Grafana Container, io.openshift.expose-services=, architecture=x86_64, name=ubi8, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, summary=Grafana Container configured for Ceph mgr/dashboard integration, maintainer=Paul Cuzner , vcs-ref=3aadd00326f3dd6cfe65ee31017ab98915fddb56, com.redhat.component=ubi8-container, io.buildah.version=1.24.2) 2026-03-09T00:15:39.117 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:38 vm10 podman[80397]: 2026-03-09 00:15:38.962814736 +0000 UTC m=+0.053208134 container remove 9fb25843918bddd8da0697768c2a38bac3ffe45e5b4ab57da26320250e1a5465 (image=quay.io/ceph/ceph-grafana:8.3.5, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a, version=8.5, name=ubi8, architecture=x86_64, distribution-scope=public, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, io.k8s.display-name=Red Hat Universal Base Image 8, vendor=Red Hat, Inc., vcs-ref=3aadd00326f3dd6cfe65ee31017ab98915fddb56, maintainer=Paul Cuzner , summary=Grafana Container configured for Ceph mgr/dashboard integration, com.redhat.component=ubi8-container, io.openshift.expose-services=, io.openshift.tags=base rhel8, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.5-236.1648460182, build-date=2022-03-28T10:36:18.413762, description=Ceph Grafana Container, vcs-type=git, release=236.1648460182, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.buildah.version=1.24.2) 2026-03-09T00:15:39.117 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:38 vm10 bash[80397]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a 2026-03-09T00:15:39.117 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 systemd[1]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@grafana.a.service: Deactivated successfully. 2026-03-09T00:15:39.117 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 systemd[1]: Stopped Ceph grafana.a for fdcbddf6-1b49-11f1-80b0-7392062373f9. 2026-03-09T00:15:39.117 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 systemd[1]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@grafana.a.service: Consumed 2.096s CPU time. 2026-03-09T00:15:39.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:39 vm04 ceph-mon[51053]: pgmap v11: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 0 B/s wr, 6 op/s 2026-03-09T00:15:39.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:39 vm04 ceph-mon[46823]: pgmap v11: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 0 B/s wr, 6 op/s 2026-03-09T00:15:39.456 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 systemd[1]: Starting Ceph grafana.a for fdcbddf6-1b49-11f1-80b0-7392062373f9... 2026-03-09T00:15:39.456 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 podman[80506]: 2026-03-09 00:15:39.312354652 +0000 UTC m=+0.021411184 container create aa7f793dcb8e345e7f6e34e7964aa0fac96c4950dac46070e35e16b19fd8c446 (image=quay.io/ceph/grafana:10.4.0, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a, maintainer=Grafana Labs ) 2026-03-09T00:15:39.456 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 podman[80506]: 2026-03-09 00:15:39.348821874 +0000 UTC m=+0.057878406 container init aa7f793dcb8e345e7f6e34e7964aa0fac96c4950dac46070e35e16b19fd8c446 (image=quay.io/ceph/grafana:10.4.0, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a, maintainer=Grafana Labs ) 2026-03-09T00:15:39.456 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 podman[80506]: 2026-03-09 00:15:39.359275629 +0000 UTC m=+0.068332162 container start aa7f793dcb8e345e7f6e34e7964aa0fac96c4950dac46070e35e16b19fd8c446 (image=quay.io/ceph/grafana:10.4.0, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a, maintainer=Grafana Labs ) 2026-03-09T00:15:39.456 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 bash[80506]: aa7f793dcb8e345e7f6e34e7964aa0fac96c4950dac46070e35e16b19fd8c446 2026-03-09T00:15:39.456 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 podman[80506]: 2026-03-09 00:15:39.30404488 +0000 UTC m=+0.013101412 image pull c8b91775d855b99270fc5d22f3c6737e8cca01ef4c25c8b0362295e0746fa39b quay.io/ceph/grafana:10.4.0 2026-03-09T00:15:39.456 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 systemd[1]: Started Ceph grafana.a for fdcbddf6-1b49-11f1-80b0-7392062373f9. 2026-03-09T00:15:39.714 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=settings t=2026-03-09T00:15:39.45574012Z level=info msg="Starting Grafana" version=10.4.0 commit=03f502a94d17f7dc4e6c34acdf8428aedd986e4c branch=HEAD compiled=2026-03-09T00:15:39Z 2026-03-09T00:15:39.714 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=settings t=2026-03-09T00:15:39.457185987Z level=info msg="Config loaded from" file=/usr/share/grafana/conf/defaults.ini 2026-03-09T00:15:39.714 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=settings t=2026-03-09T00:15:39.457252752Z level=info msg="Config loaded from" file=/etc/grafana/grafana.ini 2026-03-09T00:15:39.714 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=settings t=2026-03-09T00:15:39.457298428Z level=info msg="Config overridden from command line" arg="default.paths.data=/var/lib/grafana" 2026-03-09T00:15:39.714 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=settings t=2026-03-09T00:15:39.457335838Z level=info msg="Config overridden from command line" arg="default.paths.logs=/var/log/grafana" 2026-03-09T00:15:39.714 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=settings t=2026-03-09T00:15:39.457371344Z level=info msg="Config overridden from command line" arg="default.paths.plugins=/var/lib/grafana/plugins" 2026-03-09T00:15:39.714 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=settings t=2026-03-09T00:15:39.457408884Z level=info msg="Config overridden from command line" arg="default.paths.provisioning=/etc/grafana/provisioning" 2026-03-09T00:15:39.714 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=settings t=2026-03-09T00:15:39.457443509Z level=info msg="Config overridden from command line" arg="default.log.mode=console" 2026-03-09T00:15:39.714 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=settings t=2026-03-09T00:15:39.457491118Z level=info msg="Config overridden from Environment variable" var="GF_PATHS_DATA=/var/lib/grafana" 2026-03-09T00:15:39.714 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=settings t=2026-03-09T00:15:39.457529981Z level=info msg="Config overridden from Environment variable" var="GF_PATHS_LOGS=/var/log/grafana" 2026-03-09T00:15:39.714 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=settings t=2026-03-09T00:15:39.457563384Z level=info msg="Config overridden from Environment variable" var="GF_PATHS_PLUGINS=/var/lib/grafana/plugins" 2026-03-09T00:15:39.714 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=settings t=2026-03-09T00:15:39.457601224Z level=info msg="Config overridden from Environment variable" var="GF_PATHS_PROVISIONING=/etc/grafana/provisioning" 2026-03-09T00:15:39.714 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=settings t=2026-03-09T00:15:39.457639165Z level=info msg=Target target=[all] 2026-03-09T00:15:39.714 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=settings t=2026-03-09T00:15:39.4576789Z level=info msg="Path Home" path=/usr/share/grafana 2026-03-09T00:15:39.714 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=settings t=2026-03-09T00:15:39.457713414Z level=info msg="Path Data" path=/var/lib/grafana 2026-03-09T00:15:39.714 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=settings t=2026-03-09T00:15:39.457746355Z level=info msg="Path Logs" path=/var/log/grafana 2026-03-09T00:15:39.714 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=settings t=2026-03-09T00:15:39.457779157Z level=info msg="Path Plugins" path=/var/lib/grafana/plugins 2026-03-09T00:15:39.714 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=settings t=2026-03-09T00:15:39.457812078Z level=info msg="Path Provisioning" path=/etc/grafana/provisioning 2026-03-09T00:15:39.714 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=settings t=2026-03-09T00:15:39.457845421Z level=info msg="App mode production" 2026-03-09T00:15:39.714 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=sqlstore t=2026-03-09T00:15:39.458390592Z level=info msg="Connecting to DB" dbtype=sqlite3 2026-03-09T00:15:39.714 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=sqlstore t=2026-03-09T00:15:39.458486311Z level=warn msg="SQLite database file has broader permissions than it should" path=/var/lib/grafana/grafana.db mode=-rw-r--r-- expected=-rw-r----- 2026-03-09T00:15:39.714 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.459207491Z level=info msg="Starting DB migrations" 2026-03-09T00:15:39.714 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.465231235Z level=info msg="Executing migration" id="Update is_service_account column to nullable" 2026-03-09T00:15:39.714 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.495345356Z level=info msg="Migration successfully executed" id="Update is_service_account column to nullable" duration=30.104813ms 2026-03-09T00:15:39.714 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.496436327Z level=info msg="Executing migration" id="Add uid column to user" 2026-03-09T00:15:39.714 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.499132264Z level=info msg="Migration successfully executed" id="Add uid column to user" duration=2.695325ms 2026-03-09T00:15:39.714 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.499914297Z level=info msg="Executing migration" id="Update uid column values for users" 2026-03-09T00:15:39.714 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.500116415Z level=info msg="Migration successfully executed" id="Update uid column values for users" duration=202.719µs 2026-03-09T00:15:39.714 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.500842875Z level=info msg="Executing migration" id="Add unique index user_uid" 2026-03-09T00:15:39.714 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.501446295Z level=info msg="Migration successfully executed" id="Add unique index user_uid" duration=604.551µs 2026-03-09T00:15:39.715 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.502495238Z level=info msg="Executing migration" id="Add isPublic for dashboard" 2026-03-09T00:15:39.715 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.505398Z level=info msg="Migration successfully executed" id="Add isPublic for dashboard" duration=2.897061ms 2026-03-09T00:15:39.715 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.506349932Z level=info msg="Executing migration" id="set service account foreign key to nil if 0" 2026-03-09T00:15:39.715 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.506544626Z level=info msg="Migration successfully executed" id="set service account foreign key to nil if 0" duration=194.374µs 2026-03-09T00:15:39.715 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.507172893Z level=info msg="Executing migration" id="Add last_used_at to api_key table" 2026-03-09T00:15:39.715 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.509596188Z level=info msg="Migration successfully executed" id="Add last_used_at to api_key table" duration=2.422855ms 2026-03-09T00:15:39.715 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.510259659Z level=info msg="Executing migration" id="Add is_revoked column to api_key table" 2026-03-09T00:15:39.715 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.512483191Z level=info msg="Migration successfully executed" id="Add is_revoked column to api_key table" duration=2.223231ms 2026-03-09T00:15:39.715 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.513323914Z level=info msg="Executing migration" id="Add playlist column created_at" 2026-03-09T00:15:39.715 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.515532198Z level=info msg="Migration successfully executed" id="Add playlist column created_at" duration=2.207391ms 2026-03-09T00:15:39.715 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.516164651Z level=info msg="Executing migration" id="Add playlist column updated_at" 2026-03-09T00:15:39.715 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.518410665Z level=info msg="Migration successfully executed" id="Add playlist column updated_at" duration=2.246996ms 2026-03-09T00:15:39.715 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.519139228Z level=info msg="Executing migration" id="Add column preferences.json_data" 2026-03-09T00:15:39.715 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.521482273Z level=info msg="Migration successfully executed" id="Add column preferences.json_data" duration=2.338637ms 2026-03-09T00:15:39.715 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.52217514Z level=info msg="Executing migration" id="alter preferences.json_data to mediumtext v1" 2026-03-09T00:15:39.715 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.522293252Z level=info msg="Migration successfully executed" id="alter preferences.json_data to mediumtext v1" duration=117.128µs 2026-03-09T00:15:39.715 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.523010523Z level=info msg="Executing migration" id="Add preferences index org_id" 2026-03-09T00:15:39.715 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.523648357Z level=info msg="Migration successfully executed" id="Add preferences index org_id" duration=637.604µs 2026-03-09T00:15:39.715 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.524393102Z level=info msg="Executing migration" id="Add preferences index user_id" 2026-03-09T00:15:39.715 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.526900795Z level=info msg="Migration successfully executed" id="Add preferences index user_id" duration=2.507443ms 2026-03-09T00:15:39.715 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.529578977Z level=info msg="Executing migration" id="Increase tags column to length 4096" 2026-03-09T00:15:39.715 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.529792176Z level=info msg="Migration successfully executed" id="Increase tags column to length 4096" duration=212.799µs 2026-03-09T00:15:39.715 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.530667925Z level=info msg="Executing migration" id="Add column uid in team" 2026-03-09T00:15:39.715 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.533626392Z level=info msg="Migration successfully executed" id="Add column uid in team" duration=2.954991ms 2026-03-09T00:15:39.715 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.534649317Z level=info msg="Executing migration" id="Update uid column values in team" 2026-03-09T00:15:39.715 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.534895227Z level=info msg="Migration successfully executed" id="Update uid column values in team" duration=246.661µs 2026-03-09T00:15:39.715 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.535521029Z level=info msg="Executing migration" id="Add unique index team_org_id_uid" 2026-03-09T00:15:39.715 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.536369556Z level=info msg="Migration successfully executed" id="Add unique index team_org_id_uid" duration=848.938µs 2026-03-09T00:15:39.715 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.537245465Z level=info msg="Executing migration" id="Add OAuth ID token to user_auth" 2026-03-09T00:15:39.715 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.540147969Z level=info msg="Migration successfully executed" id="Add OAuth ID token to user_auth" duration=2.898064ms 2026-03-09T00:15:39.715 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.546198292Z level=info msg="Executing migration" id="add index user_auth_token.revoked_at" 2026-03-09T00:15:39.715 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.547642354Z level=info msg="Migration successfully executed" id="add index user_auth_token.revoked_at" duration=1.443451ms 2026-03-09T00:15:39.715 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.54884212Z level=info msg="Executing migration" id="alter table short_url alter column created_by type to bigint" 2026-03-09T00:15:39.715 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.548995476Z level=info msg="Migration successfully executed" id="alter table short_url alter column created_by type to bigint" duration=153.167µs 2026-03-09T00:15:39.715 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.549779134Z level=info msg="Executing migration" id="add current_reason column related to current_state" 2026-03-09T00:15:39.716 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.553111451Z level=info msg="Migration successfully executed" id="add current_reason column related to current_state" duration=3.327357ms 2026-03-09T00:15:39.716 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.553995946Z level=info msg="Executing migration" id="add result_fingerprint column to alert_instance" 2026-03-09T00:15:39.716 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.558361867Z level=info msg="Migration successfully executed" id="add result_fingerprint column to alert_instance" duration=4.362345ms 2026-03-09T00:15:39.716 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.559574196Z level=info msg="Executing migration" id="add rule_group_idx column to alert_rule" 2026-03-09T00:15:39.716 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.562482599Z level=info msg="Migration successfully executed" id="add rule_group_idx column to alert_rule" duration=2.904476ms 2026-03-09T00:15:39.716 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.563501476Z level=info msg="Executing migration" id="add is_paused column to alert_rule table" 2026-03-09T00:15:39.716 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.567090323Z level=info msg="Migration successfully executed" id="add is_paused column to alert_rule table" duration=3.583667ms 2026-03-09T00:15:39.716 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.56850498Z level=info msg="Executing migration" id="fix is_paused column for alert_rule table" 2026-03-09T00:15:39.716 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.568695888Z level=info msg="Migration successfully executed" id="fix is_paused column for alert_rule table" duration=192.852µs 2026-03-09T00:15:39.716 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.569590963Z level=info msg="Executing migration" id="add rule_group_idx column to alert_rule_version" 2026-03-09T00:15:39.716 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.572253526Z level=info msg="Migration successfully executed" id="add rule_group_idx column to alert_rule_version" duration=2.658817ms 2026-03-09T00:15:39.716 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.573140407Z level=info msg="Executing migration" id="add is_paused column to alert_rule_versions table" 2026-03-09T00:15:39.716 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.575756583Z level=info msg="Migration successfully executed" id="add is_paused column to alert_rule_versions table" duration=2.614854ms 2026-03-09T00:15:39.716 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.576576548Z level=info msg="Executing migration" id="fix is_paused column for alert_rule_version table" 2026-03-09T00:15:39.716 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.576741326Z level=info msg="Migration successfully executed" id="fix is_paused column for alert_rule_version table" duration=166.02µs 2026-03-09T00:15:39.716 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.577574545Z level=info msg="Executing migration" id="add configuration_hash column to alert_configuration" 2026-03-09T00:15:39.716 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.580283155Z level=info msg="Migration successfully executed" id="add configuration_hash column to alert_configuration" duration=2.706045ms 2026-03-09T00:15:39.716 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.58114061Z level=info msg="Executing migration" id="add column send_alerts_to in ngalert_configuration" 2026-03-09T00:15:39.716 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.583554307Z level=info msg="Migration successfully executed" id="add column send_alerts_to in ngalert_configuration" duration=2.410411ms 2026-03-09T00:15:39.716 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.584793106Z level=info msg="Executing migration" id="create provenance_type table" 2026-03-09T00:15:39.716 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.58534633Z level=info msg="Migration successfully executed" id="create provenance_type table" duration=552.163µs 2026-03-09T00:15:39.716 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.586194048Z level=info msg="Executing migration" id="add index to uniquify (record_key, record_type, org_id) columns" 2026-03-09T00:15:39.716 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.588477081Z level=info msg="Migration successfully executed" id="add index to uniquify (record_key, record_type, org_id) columns" duration=2.281038ms 2026-03-09T00:15:39.716 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.589805547Z level=info msg="Executing migration" id="create alert_image table" 2026-03-09T00:15:39.716 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.590410629Z level=info msg="Migration successfully executed" id="create alert_image table" duration=607.397µs 2026-03-09T00:15:39.716 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.591230724Z level=info msg="Executing migration" id="add unique index on token to alert_image table" 2026-03-09T00:15:39.717 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.591863218Z level=info msg="Migration successfully executed" id="add unique index on token to alert_image table" duration=634.277µs 2026-03-09T00:15:39.717 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.592652936Z level=info msg="Executing migration" id="support longer URLs in alert_image table" 2026-03-09T00:15:39.717 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.592764475Z level=info msg="Migration successfully executed" id="support longer URLs in alert_image table" duration=111.408µs 2026-03-09T00:15:39.717 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.59337141Z level=info msg="Executing migration" id=create_alert_configuration_history_table 2026-03-09T00:15:39.717 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.594151891Z level=info msg="Migration successfully executed" id=create_alert_configuration_history_table duration=780.522µs 2026-03-09T00:15:39.717 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.595304859Z level=info msg="Executing migration" id="drop non-unique orgID index on alert_configuration" 2026-03-09T00:15:39.717 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.595894223Z level=info msg="Migration successfully executed" id="drop non-unique orgID index on alert_configuration" duration=588.11µs 2026-03-09T00:15:39.717 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.596588942Z level=info msg="Executing migration" id="drop unique orgID index on alert_configuration if exists" 2026-03-09T00:15:39.717 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.596841905Z level=warn msg="Skipping migration: Already executed, but not recorded in migration log" id="drop unique orgID index on alert_configuration if exists" 2026-03-09T00:15:39.717 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.597583313Z level=info msg="Executing migration" id="extract alertmanager configuration history to separate table" 2026-03-09T00:15:39.717 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.598201651Z level=info msg="Migration successfully executed" id="extract alertmanager configuration history to separate table" duration=618.527µs 2026-03-09T00:15:39.717 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.59884816Z level=info msg="Executing migration" id="add unique index on orgID to alert_configuration" 2026-03-09T00:15:39.717 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.599458543Z level=info msg="Migration successfully executed" id="add unique index on orgID to alert_configuration" duration=610.554µs 2026-03-09T00:15:39.717 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.60011966Z level=info msg="Executing migration" id="add last_applied column to alert_configuration_history" 2026-03-09T00:15:39.717 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.602499264Z level=info msg="Migration successfully executed" id="add last_applied column to alert_configuration_history" duration=2.378642ms 2026-03-09T00:15:39.717 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.603246853Z level=info msg="Executing migration" id="increase max description length to 2048" 2026-03-09T00:15:39.717 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.603338845Z level=info msg="Migration successfully executed" id="increase max description length to 2048" duration=91.932µs 2026-03-09T00:15:39.717 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.603991858Z level=info msg="Executing migration" id="alter library_element model to mediumtext" 2026-03-09T00:15:39.717 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.604095922Z level=info msg="Migration successfully executed" id="alter library_element model to mediumtext" duration=104.325µs 2026-03-09T00:15:39.717 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.604772007Z level=info msg="Executing migration" id="create secrets table" 2026-03-09T00:15:39.717 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.605301549Z level=info msg="Migration successfully executed" id="create secrets table" duration=530.433µs 2026-03-09T00:15:39.717 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.606084094Z level=info msg="Executing migration" id="rename data_keys name column to id" 2026-03-09T00:15:39.717 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.621747839Z level=info msg="Migration successfully executed" id="rename data_keys name column to id" duration=15.658536ms 2026-03-09T00:15:39.717 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.622942626Z level=info msg="Executing migration" id="add name column into data_keys" 2026-03-09T00:15:39.717 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.625975222Z level=info msg="Migration successfully executed" id="add name column into data_keys" duration=3.018109ms 2026-03-09T00:15:39.717 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.626822628Z level=info msg="Executing migration" id="copy data_keys id column values into name" 2026-03-09T00:15:39.717 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.627060082Z level=info msg="Migration successfully executed" id="copy data_keys id column values into name" duration=238.006µs 2026-03-09T00:15:39.717 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.629893705Z level=info msg="Executing migration" id="rename data_keys name column to label" 2026-03-09T00:15:39.717 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.644083934Z level=info msg="Migration successfully executed" id="rename data_keys name column to label" duration=14.18555ms 2026-03-09T00:15:39.717 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.64620302Z level=info msg="Executing migration" id="rename data_keys id column back to name" 2026-03-09T00:15:39.717 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.657731236Z level=info msg="Migration successfully executed" id="rename data_keys id column back to name" duration=11.523638ms 2026-03-09T00:15:39.718 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.658667458Z level=info msg="Executing migration" id="add column hidden to role table" 2026-03-09T00:15:39.718 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.661394742Z level=info msg="Migration successfully executed" id="add column hidden to role table" duration=2.723516ms 2026-03-09T00:15:39.718 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.662331867Z level=info msg="Executing migration" id="permission kind migration" 2026-03-09T00:15:39.718 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.664916223Z level=info msg="Migration successfully executed" id="permission kind migration" duration=2.582183ms 2026-03-09T00:15:39.718 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.665741929Z level=info msg="Executing migration" id="permission attribute migration" 2026-03-09T00:15:39.718 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.668189349Z level=info msg="Migration successfully executed" id="permission attribute migration" duration=2.445617ms 2026-03-09T00:15:39.718 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.669128627Z level=info msg="Executing migration" id="permission identifier migration" 2026-03-09T00:15:39.718 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.671784066Z level=info msg="Migration successfully executed" id="permission identifier migration" duration=2.653876ms 2026-03-09T00:15:39.718 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.672633657Z level=info msg="Executing migration" id="add permission identifier index" 2026-03-09T00:15:39.718 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.673318238Z level=info msg="Migration successfully executed" id="add permission identifier index" duration=685.093µs 2026-03-09T00:15:39.718 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.674109659Z level=info msg="Executing migration" id="add permission action scope role_id index" 2026-03-09T00:15:39.718 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.6747347Z level=info msg="Migration successfully executed" id="add permission action scope role_id index" duration=625.421µs 2026-03-09T00:15:39.718 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.675593427Z level=info msg="Executing migration" id="remove permission role_id action scope index" 2026-03-09T00:15:39.718 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.676244365Z level=info msg="Migration successfully executed" id="remove permission role_id action scope index" duration=650.857µs 2026-03-09T00:15:39.718 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.676935038Z level=info msg="Executing migration" id="create query_history table v1" 2026-03-09T00:15:39.718 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.677574033Z level=info msg="Migration successfully executed" id="create query_history table v1" duration=626.081µs 2026-03-09T00:15:39.718 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.678297428Z level=info msg="Executing migration" id="add index query_history.org_id-created_by-datasource_uid" 2026-03-09T00:15:39.718 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.679005783Z level=info msg="Migration successfully executed" id="add index query_history.org_id-created_by-datasource_uid" duration=708.196µs 2026-03-09T00:15:39.718 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.679679764Z level=info msg="Executing migration" id="alter table query_history alter column created_by type to bigint" 2026-03-09T00:15:39.718 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.679781745Z level=info msg="Migration successfully executed" id="alter table query_history alter column created_by type to bigint" duration=102.572µs 2026-03-09T00:15:39.718 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.68067634Z level=info msg="Executing migration" id="rbac disabled migrator" 2026-03-09T00:15:39.718 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.680765777Z level=info msg="Migration successfully executed" id="rbac disabled migrator" duration=89.699µs 2026-03-09T00:15:39.718 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.681637268Z level=info msg="Executing migration" id="teams permissions migration" 2026-03-09T00:15:39.718 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.6820232Z level=info msg="Migration successfully executed" id="teams permissions migration" duration=386.142µs 2026-03-09T00:15:39.718 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.682819812Z level=info msg="Executing migration" id="dashboard permissions" 2026-03-09T00:15:39.718 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.685080011Z level=info msg="Migration successfully executed" id="dashboard permissions" duration=2.25994ms 2026-03-09T00:15:39.718 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.68594457Z level=info msg="Executing migration" id="dashboard permissions uid scopes" 2026-03-09T00:15:39.718 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.687493007Z level=info msg="Migration successfully executed" id="dashboard permissions uid scopes" duration=1.548588ms 2026-03-09T00:15:39.718 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.688074166Z level=info msg="Executing migration" id="drop managed folder create actions" 2026-03-09T00:15:39.718 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.688261967Z level=info msg="Migration successfully executed" id="drop managed folder create actions" duration=186.69µs 2026-03-09T00:15:39.718 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.68927883Z level=info msg="Executing migration" id="alerting notification permissions" 2026-03-09T00:15:39.718 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.6896368Z level=info msg="Migration successfully executed" id="alerting notification permissions" duration=358.02µs 2026-03-09T00:15:39.718 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.690339887Z level=info msg="Executing migration" id="create query_history_star table v1" 2026-03-09T00:15:39.718 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.69087603Z level=info msg="Migration successfully executed" id="create query_history_star table v1" duration=536.183µs 2026-03-09T00:15:39.718 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.692437601Z level=info msg="Executing migration" id="add index query_history.user_id-query_uid" 2026-03-09T00:15:39.718 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.693073853Z level=info msg="Migration successfully executed" id="add index query_history.user_id-query_uid" duration=634.458µs 2026-03-09T00:15:39.719 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.696074008Z level=info msg="Executing migration" id="add column org_id in query_history_star" 2026-03-09T00:15:39.719 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.698763812Z level=info msg="Migration successfully executed" id="add column org_id in query_history_star" duration=2.689182ms 2026-03-09T00:15:39.719 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.700246517Z level=info msg="Executing migration" id="alter table query_history_star_mig column user_id type to bigint" 2026-03-09T00:15:39.719 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.70038695Z level=info msg="Migration successfully executed" id="alter table query_history_star_mig column user_id type to bigint" duration=142.547µs 2026-03-09T00:15:39.719 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.700992613Z level=info msg="Executing migration" id="create correlation table v1" 2026-03-09T00:15:39.720 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.701558031Z level=info msg="Migration successfully executed" id="create correlation table v1" duration=565.177µs 2026-03-09T00:15:39.720 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.702396872Z level=info msg="Executing migration" id="add index correlations.uid" 2026-03-09T00:15:39.720 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.70302646Z level=info msg="Migration successfully executed" id="add index correlations.uid" duration=629.548µs 2026-03-09T00:15:39.720 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.703831807Z level=info msg="Executing migration" id="add index correlations.source_uid" 2026-03-09T00:15:39.720 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.7044053Z level=info msg="Migration successfully executed" id="add index correlations.source_uid" duration=573.684µs 2026-03-09T00:15:39.720 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.705252366Z level=info msg="Executing migration" id="add correlation config column" 2026-03-09T00:15:39.720 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.707936189Z level=info msg="Migration successfully executed" id="add correlation config column" duration=2.682571ms 2026-03-09T00:15:39.720 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.718437653Z level=info msg="Executing migration" id="drop index IDX_correlation_uid - v1" 2026-03-09T00:15:39.721 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.719364388Z level=info msg="Migration successfully executed" id="drop index IDX_correlation_uid - v1" duration=928.698µs 2026-03-09T00:15:39.975 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.720082312Z level=info msg="Executing migration" id="drop index IDX_correlation_source_uid - v1" 2026-03-09T00:15:39.975 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.720719003Z level=info msg="Migration successfully executed" id="drop index IDX_correlation_source_uid - v1" duration=636.972µs 2026-03-09T00:15:39.975 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.721335868Z level=info msg="Executing migration" id="Rename table correlation to correlation_tmp_qwerty - v1" 2026-03-09T00:15:39.975 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.729245622Z level=info msg="Migration successfully executed" id="Rename table correlation to correlation_tmp_qwerty - v1" duration=7.905957ms 2026-03-09T00:15:39.975 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.730818275Z level=info msg="Executing migration" id="create correlation v2" 2026-03-09T00:15:39.975 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.731709603Z level=info msg="Migration successfully executed" id="create correlation v2" duration=891.98µs 2026-03-09T00:15:39.975 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.732595071Z level=info msg="Executing migration" id="create index IDX_correlation_uid - v2" 2026-03-09T00:15:39.975 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.73334865Z level=info msg="Migration successfully executed" id="create index IDX_correlation_uid - v2" duration=753.559µs 2026-03-09T00:15:39.975 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.734323194Z level=info msg="Executing migration" id="create index IDX_correlation_source_uid - v2" 2026-03-09T00:15:39.975 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.735005462Z level=info msg="Migration successfully executed" id="create index IDX_correlation_source_uid - v2" duration=682.237µs 2026-03-09T00:15:39.975 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.735782766Z level=info msg="Executing migration" id="create index IDX_correlation_org_id - v2" 2026-03-09T00:15:39.976 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.736552707Z level=info msg="Migration successfully executed" id="create index IDX_correlation_org_id - v2" duration=769.862µs 2026-03-09T00:15:39.976 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.737809138Z level=info msg="Executing migration" id="copy correlation v1 to v2" 2026-03-09T00:15:39.976 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.738066961Z level=info msg="Migration successfully executed" id="copy correlation v1 to v2" duration=257.973µs 2026-03-09T00:15:39.976 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.738726706Z level=info msg="Executing migration" id="drop correlation_tmp_qwerty" 2026-03-09T00:15:39.976 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.739314557Z level=info msg="Migration successfully executed" id="drop correlation_tmp_qwerty" duration=589.684µs 2026-03-09T00:15:39.976 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.739949324Z level=info msg="Executing migration" id="add provisioning column" 2026-03-09T00:15:39.976 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.743439877Z level=info msg="Migration successfully executed" id="add provisioning column" duration=3.488809ms 2026-03-09T00:15:39.976 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.744361342Z level=info msg="Executing migration" id="create entity_events table" 2026-03-09T00:15:39.976 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.745547281Z level=info msg="Migration successfully executed" id="create entity_events table" duration=1.183695ms 2026-03-09T00:15:39.976 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.746258804Z level=info msg="Executing migration" id="create dashboard public config v1" 2026-03-09T00:15:39.976 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.746952361Z level=info msg="Migration successfully executed" id="create dashboard public config v1" duration=693.357µs 2026-03-09T00:15:39.976 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.747692507Z level=info msg="Executing migration" id="drop index UQE_dashboard_public_config_uid - v1" 2026-03-09T00:15:39.976 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.747978822Z level=warn msg="Skipping migration: Already executed, but not recorded in migration log" id="drop index UQE_dashboard_public_config_uid - v1" 2026-03-09T00:15:39.976 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.748595807Z level=info msg="Executing migration" id="drop index IDX_dashboard_public_config_org_id_dashboard_uid - v1" 2026-03-09T00:15:39.976 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.748856686Z level=warn msg="Skipping migration: Already executed, but not recorded in migration log" id="drop index IDX_dashboard_public_config_org_id_dashboard_uid - v1" 2026-03-09T00:15:39.976 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.74961768Z level=info msg="Executing migration" id="Drop old dashboard public config table" 2026-03-09T00:15:39.976 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.750185783Z level=info msg="Migration successfully executed" id="Drop old dashboard public config table" duration=567.181µs 2026-03-09T00:15:39.976 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.750752504Z level=info msg="Executing migration" id="recreate dashboard public config v1" 2026-03-09T00:15:39.976 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.751421726Z level=info msg="Migration successfully executed" id="recreate dashboard public config v1" duration=669.462µs 2026-03-09T00:15:39.976 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.752068827Z level=info msg="Executing migration" id="create index UQE_dashboard_public_config_uid - v1" 2026-03-09T00:15:39.976 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.752645947Z level=info msg="Migration successfully executed" id="create index UQE_dashboard_public_config_uid - v1" duration=578.592µs 2026-03-09T00:15:39.976 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.753421309Z level=info msg="Executing migration" id="create index IDX_dashboard_public_config_org_id_dashboard_uid - v1" 2026-03-09T00:15:39.976 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.754083208Z level=info msg="Migration successfully executed" id="create index IDX_dashboard_public_config_org_id_dashboard_uid - v1" duration=660.677µs 2026-03-09T00:15:39.976 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.754810999Z level=info msg="Executing migration" id="drop index UQE_dashboard_public_config_uid - v2" 2026-03-09T00:15:39.977 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.755431211Z level=info msg="Migration successfully executed" id="drop index UQE_dashboard_public_config_uid - v2" duration=620.262µs 2026-03-09T00:15:39.978 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.75607779Z level=info msg="Executing migration" id="drop index IDX_dashboard_public_config_org_id_dashboard_uid - v2" 2026-03-09T00:15:39.978 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.756634192Z level=info msg="Migration successfully executed" id="drop index IDX_dashboard_public_config_org_id_dashboard_uid - v2" duration=555.2µs 2026-03-09T00:15:39.978 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.758939667Z level=info msg="Executing migration" id="Drop public config table" 2026-03-09T00:15:39.978 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.759443329Z level=info msg="Migration successfully executed" id="Drop public config table" duration=503.792µs 2026-03-09T00:15:39.978 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.760075833Z level=info msg="Executing migration" id="Recreate dashboard public config v2" 2026-03-09T00:15:39.978 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.761729959Z level=info msg="Migration successfully executed" id="Recreate dashboard public config v2" duration=1.653765ms 2026-03-09T00:15:39.978 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.762374245Z level=info msg="Executing migration" id="create index UQE_dashboard_public_config_uid - v2" 2026-03-09T00:15:39.978 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.762952948Z level=info msg="Migration successfully executed" id="create index UQE_dashboard_public_config_uid - v2" duration=578.592µs 2026-03-09T00:15:39.978 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.76360073Z level=info msg="Executing migration" id="create index IDX_dashboard_public_config_org_id_dashboard_uid - v2" 2026-03-09T00:15:39.978 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.764160056Z level=info msg="Migration successfully executed" id="create index IDX_dashboard_public_config_org_id_dashboard_uid - v2" duration=559.166µs 2026-03-09T00:15:39.978 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.764705277Z level=info msg="Executing migration" id="create index UQE_dashboard_public_config_access_token - v2" 2026-03-09T00:15:39.978 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.765226763Z level=info msg="Migration successfully executed" id="create index UQE_dashboard_public_config_access_token - v2" duration=521.245µs 2026-03-09T00:15:39.978 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.76587636Z level=info msg="Executing migration" id="Rename table dashboard_public_config to dashboard_public - v2" 2026-03-09T00:15:39.978 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.772821197Z level=info msg="Migration successfully executed" id="Rename table dashboard_public_config to dashboard_public - v2" duration=6.944176ms 2026-03-09T00:15:39.978 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.773533881Z level=info msg="Executing migration" id="add annotations_enabled column" 2026-03-09T00:15:39.978 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.776177669Z level=info msg="Migration successfully executed" id="add annotations_enabled column" duration=2.643958ms 2026-03-09T00:15:39.978 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.77688341Z level=info msg="Executing migration" id="add time_selection_enabled column" 2026-03-09T00:15:39.978 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.779791332Z level=info msg="Migration successfully executed" id="add time_selection_enabled column" duration=2.908042ms 2026-03-09T00:15:39.978 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.780443012Z level=info msg="Executing migration" id="delete orphaned public dashboards" 2026-03-09T00:15:39.978 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.780634711Z level=info msg="Migration successfully executed" id="delete orphaned public dashboards" duration=192.792µs 2026-03-09T00:15:39.978 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.781326926Z level=info msg="Executing migration" id="add share column" 2026-03-09T00:15:39.978 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.78403257Z level=info msg="Migration successfully executed" id="add share column" duration=2.705344ms 2026-03-09T00:15:39.978 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.784574965Z level=info msg="Executing migration" id="backfill empty share column fields with default of public" 2026-03-09T00:15:39.978 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.784738711Z level=info msg="Migration successfully executed" id="backfill empty share column fields with default of public" duration=164.027µs 2026-03-09T00:15:39.978 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.785342973Z level=info msg="Executing migration" id="create file table" 2026-03-09T00:15:39.978 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.785835484Z level=info msg="Migration successfully executed" id="create file table" duration=492.712µs 2026-03-09T00:15:39.978 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.786515076Z level=info msg="Executing migration" id="file table idx: path natural pk" 2026-03-09T00:15:39.978 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.787060858Z level=info msg="Migration successfully executed" id="file table idx: path natural pk" duration=545.722µs 2026-03-09T00:15:39.978 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.787702519Z level=info msg="Executing migration" id="file table idx: parent_folder_path_hash fast folder retrieval" 2026-03-09T00:15:39.978 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.78822151Z level=info msg="Migration successfully executed" id="file table idx: parent_folder_path_hash fast folder retrieval" duration=519.223µs 2026-03-09T00:15:39.978 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.788785986Z level=info msg="Executing migration" id="create file_meta table" 2026-03-09T00:15:39.978 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.789189572Z level=info msg="Migration successfully executed" id="create file_meta table" duration=403.504µs 2026-03-09T00:15:39.978 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.789850008Z level=info msg="Executing migration" id="file table idx: path key" 2026-03-09T00:15:39.978 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.790366785Z level=info msg="Migration successfully executed" id="file table idx: path key" duration=516.767µs 2026-03-09T00:15:39.978 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.791087023Z level=info msg="Executing migration" id="set path collation in file table" 2026-03-09T00:15:39.978 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.791184295Z level=info msg="Migration successfully executed" id="set path collation in file table" duration=97.642µs 2026-03-09T00:15:39.978 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.79184356Z level=info msg="Executing migration" id="migrate contents column to mediumblob for MySQL" 2026-03-09T00:15:39.978 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.791936974Z level=info msg="Migration successfully executed" id="migrate contents column to mediumblob for MySQL" duration=93.855µs 2026-03-09T00:15:39.978 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.792579356Z level=info msg="Executing migration" id="managed permissions migration" 2026-03-09T00:15:39.978 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.793954711Z level=info msg="Migration successfully executed" id="managed permissions migration" duration=1.375446ms 2026-03-09T00:15:39.978 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.794671993Z level=info msg="Executing migration" id="managed folder permissions alert actions migration" 2026-03-09T00:15:39.978 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.79541286Z level=info msg="Migration successfully executed" id="managed folder permissions alert actions migration" duration=740.867µs 2026-03-09T00:15:39.978 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.795913466Z level=info msg="Executing migration" id="RBAC action name migrator" 2026-03-09T00:15:39.978 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.79667396Z level=info msg="Migration successfully executed" id="RBAC action name migrator" duration=760.454µs 2026-03-09T00:15:39.978 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.797334306Z level=info msg="Executing migration" id="Add UID column to playlist" 2026-03-09T00:15:39.978 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.79998657Z level=info msg="Migration successfully executed" id="Add UID column to playlist" duration=2.651873ms 2026-03-09T00:15:39.979 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.800599787Z level=info msg="Executing migration" id="Update uid column values in playlist" 2026-03-09T00:15:39.979 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.80074535Z level=info msg="Migration successfully executed" id="Update uid column values in playlist" duration=145.754µs 2026-03-09T00:15:39.979 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.801360651Z level=info msg="Executing migration" id="Add index for uid in playlist" 2026-03-09T00:15:39.979 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.801936189Z level=info msg="Migration successfully executed" id="Add index for uid in playlist" duration=575.517µs 2026-03-09T00:15:39.979 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.802906314Z level=info msg="Executing migration" id="update group index for alert rules" 2026-03-09T00:15:39.979 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.803195506Z level=info msg="Migration successfully executed" id="update group index for alert rules" duration=289.362µs 2026-03-09T00:15:39.979 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.803760503Z level=info msg="Executing migration" id="managed folder permissions alert actions repeated migration" 2026-03-09T00:15:39.979 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.804283832Z level=info msg="Migration successfully executed" id="managed folder permissions alert actions repeated migration" duration=523.47µs 2026-03-09T00:15:39.979 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.804926595Z level=info msg="Executing migration" id="admin only folder/dashboard permission" 2026-03-09T00:15:39.979 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.80524437Z level=info msg="Migration successfully executed" id="admin only folder/dashboard permission" duration=317.786µs 2026-03-09T00:15:39.979 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.805881382Z level=info msg="Executing migration" id="add action column to seed_assignment" 2026-03-09T00:15:39.979 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.808625829Z level=info msg="Migration successfully executed" id="add action column to seed_assignment" duration=2.744336ms 2026-03-09T00:15:39.979 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.809169917Z level=info msg="Executing migration" id="add scope column to seed_assignment" 2026-03-09T00:15:39.979 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.811997388Z level=info msg="Migration successfully executed" id="add scope column to seed_assignment" duration=2.827282ms 2026-03-09T00:15:39.979 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.812523484Z level=info msg="Executing migration" id="remove unique index builtin_role_role_name before nullable update" 2026-03-09T00:15:39.979 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.813044488Z level=info msg="Migration successfully executed" id="remove unique index builtin_role_role_name before nullable update" duration=521.215µs 2026-03-09T00:15:39.979 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.813593206Z level=info msg="Executing migration" id="update seed_assignment role_name column to nullable" 2026-03-09T00:15:39.979 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.843161586Z level=info msg="Migration successfully executed" id="update seed_assignment role_name column to nullable" duration=29.564433ms 2026-03-09T00:15:39.979 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.844157029Z level=info msg="Executing migration" id="add unique index builtin_role_name back" 2026-03-09T00:15:39.979 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.844766489Z level=info msg="Migration successfully executed" id="add unique index builtin_role_name back" duration=609.64µs 2026-03-09T00:15:39.979 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.845308765Z level=info msg="Executing migration" id="add unique index builtin_role_action_scope" 2026-03-09T00:15:39.979 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.845891324Z level=info msg="Migration successfully executed" id="add unique index builtin_role_action_scope" duration=582.479µs 2026-03-09T00:15:39.979 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.84661534Z level=info msg="Executing migration" id="add primary key to seed_assigment" 2026-03-09T00:15:39.979 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.859029484Z level=info msg="Migration successfully executed" id="add primary key to seed_assigment" duration=12.406009ms 2026-03-09T00:15:39.979 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.860093816Z level=info msg="Executing migration" id="add origin column to seed_assignment" 2026-03-09T00:15:39.979 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.862918953Z level=info msg="Migration successfully executed" id="add origin column to seed_assignment" duration=2.824606ms 2026-03-09T00:15:39.979 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.865564194Z level=info msg="Executing migration" id="add origin to plugin seed_assignment" 2026-03-09T00:15:39.979 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.866019917Z level=info msg="Migration successfully executed" id="add origin to plugin seed_assignment" duration=454.621µs 2026-03-09T00:15:39.979 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.867003458Z level=info msg="Executing migration" id="prevent seeding OnCall access" 2026-03-09T00:15:39.979 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.867194566Z level=info msg="Migration successfully executed" id="prevent seeding OnCall access" duration=191.228µs 2026-03-09T00:15:39.979 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.86784853Z level=info msg="Executing migration" id="managed folder permissions alert actions repeated fixed migration" 2026-03-09T00:15:39.979 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.868563938Z level=info msg="Migration successfully executed" id="managed folder permissions alert actions repeated fixed migration" duration=715.508µs 2026-03-09T00:15:39.979 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.869212222Z level=info msg="Executing migration" id="managed folder permissions library panel actions migration" 2026-03-09T00:15:39.979 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.871384077Z level=info msg="Migration successfully executed" id="managed folder permissions library panel actions migration" duration=2.170973ms 2026-03-09T00:15:39.979 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.872028132Z level=info msg="Executing migration" id="migrate external alertmanagers to datsourcse" 2026-03-09T00:15:39.979 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.872215973Z level=info msg="Migration successfully executed" id="migrate external alertmanagers to datsourcse" duration=188.321µs 2026-03-09T00:15:39.979 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.872760272Z level=info msg="Executing migration" id="create folder table" 2026-03-09T00:15:39.979 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.873319168Z level=info msg="Migration successfully executed" id="create folder table" duration=558.785µs 2026-03-09T00:15:39.979 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.873877724Z level=info msg="Executing migration" id="Add index for parent_uid" 2026-03-09T00:15:39.979 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.8744904Z level=info msg="Migration successfully executed" id="Add index for parent_uid" duration=612.816µs 2026-03-09T00:15:39.979 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.875199397Z level=info msg="Executing migration" id="Add unique index for folder.uid and folder.org_id" 2026-03-09T00:15:39.979 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.875746832Z level=info msg="Migration successfully executed" id="Add unique index for folder.uid and folder.org_id" duration=547.324µs 2026-03-09T00:15:39.979 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.876437634Z level=info msg="Executing migration" id="Update folder title length" 2026-03-09T00:15:39.980 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.876527843Z level=info msg="Migration successfully executed" id="Update folder title length" duration=90.759µs 2026-03-09T00:15:39.980 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.87715619Z level=info msg="Executing migration" id="Add unique index for folder.title and folder.parent_uid" 2026-03-09T00:15:39.980 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.877742086Z level=info msg="Migration successfully executed" id="Add unique index for folder.title and folder.parent_uid" duration=584.393µs 2026-03-09T00:15:39.980 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.878356617Z level=info msg="Executing migration" id="Remove unique index for folder.title and folder.parent_uid" 2026-03-09T00:15:39.980 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.878907577Z level=info msg="Migration successfully executed" id="Remove unique index for folder.title and folder.parent_uid" duration=550.81µs 2026-03-09T00:15:39.980 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.879492973Z level=info msg="Executing migration" id="Add unique index for title, parent_uid, and org_id" 2026-03-09T00:15:39.980 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.880091123Z level=info msg="Migration successfully executed" id="Add unique index for title, parent_uid, and org_id" duration=598.11µs 2026-03-09T00:15:39.980 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.880723556Z level=info msg="Executing migration" id="Sync dashboard and folder table" 2026-03-09T00:15:39.980 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.881093067Z level=info msg="Migration successfully executed" id="Sync dashboard and folder table" duration=369.571µs 2026-03-09T00:15:39.980 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.881620315Z level=info msg="Executing migration" id="Remove ghost folders from the folder table" 2026-03-09T00:15:39.980 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.881859943Z level=info msg="Migration successfully executed" id="Remove ghost folders from the folder table" duration=239.668µs 2026-03-09T00:15:39.980 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.882409381Z level=info msg="Executing migration" id="Remove unique index UQE_folder_uid_org_id" 2026-03-09T00:15:39.980 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.882956847Z level=info msg="Migration successfully executed" id="Remove unique index UQE_folder_uid_org_id" duration=547.396µs 2026-03-09T00:15:39.980 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.883501816Z level=info msg="Executing migration" id="Add unique index UQE_folder_org_id_uid" 2026-03-09T00:15:39.980 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.884078726Z level=info msg="Migration successfully executed" id="Add unique index UQE_folder_org_id_uid" duration=577.111µs 2026-03-09T00:15:39.980 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.884614197Z level=info msg="Executing migration" id="Remove unique index UQE_folder_title_parent_uid_org_id" 2026-03-09T00:15:39.980 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.885155852Z level=info msg="Migration successfully executed" id="Remove unique index UQE_folder_title_parent_uid_org_id" duration=541.724µs 2026-03-09T00:15:39.980 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.885679702Z level=info msg="Executing migration" id="Add unique index UQE_folder_org_id_parent_uid_title" 2026-03-09T00:15:39.980 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.886252254Z level=info msg="Migration successfully executed" id="Add unique index UQE_folder_org_id_parent_uid_title" duration=570.92µs 2026-03-09T00:15:39.980 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.886741049Z level=info msg="Executing migration" id="Remove index IDX_folder_parent_uid_org_id" 2026-03-09T00:15:39.980 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.887288463Z level=info msg="Migration successfully executed" id="Remove index IDX_folder_parent_uid_org_id" duration=548.877µs 2026-03-09T00:15:39.980 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.887832111Z level=info msg="Executing migration" id="create anon_device table" 2026-03-09T00:15:39.980 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.888287062Z level=info msg="Migration successfully executed" id="create anon_device table" duration=455.162µs 2026-03-09T00:15:39.980 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.888830028Z level=info msg="Executing migration" id="add unique index anon_device.device_id" 2026-03-09T00:15:39.980 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.889528246Z level=info msg="Migration successfully executed" id="add unique index anon_device.device_id" duration=698.276µs 2026-03-09T00:15:39.980 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.890243724Z level=info msg="Executing migration" id="add index anon_device.updated_at" 2026-03-09T00:15:39.980 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.890802871Z level=info msg="Migration successfully executed" id="add index anon_device.updated_at" duration=559.197µs 2026-03-09T00:15:39.980 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.89147571Z level=info msg="Executing migration" id="create signing_key table" 2026-03-09T00:15:39.980 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.891959316Z level=info msg="Migration successfully executed" id="create signing_key table" duration=483.546µs 2026-03-09T00:15:39.980 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.892625382Z level=info msg="Executing migration" id="add unique index signing_key.key_id" 2026-03-09T00:15:39.980 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.893261172Z level=info msg="Migration successfully executed" id="add unique index signing_key.key_id" duration=635.82µs 2026-03-09T00:15:39.980 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.894159814Z level=info msg="Executing migration" id="set legacy alert migration status in kvstore" 2026-03-09T00:15:39.980 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.894773732Z level=info msg="Migration successfully executed" id="set legacy alert migration status in kvstore" duration=614.3µs 2026-03-09T00:15:39.980 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.895426815Z level=info msg="Executing migration" id="migrate record of created folders during legacy migration to kvstore" 2026-03-09T00:15:39.980 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.895607042Z level=info msg="Migration successfully executed" id="migrate record of created folders during legacy migration to kvstore" duration=180.718µs 2026-03-09T00:15:39.980 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.896090226Z level=info msg="Executing migration" id="Add folder_uid for dashboard" 2026-03-09T00:15:39.980 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.898888794Z level=info msg="Migration successfully executed" id="Add folder_uid for dashboard" duration=2.796525ms 2026-03-09T00:15:39.980 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.899503484Z level=info msg="Executing migration" id="Populate dashboard folder_uid column" 2026-03-09T00:15:39.980 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.900588274Z level=info msg="Migration successfully executed" id="Populate dashboard folder_uid column" duration=1.082947ms 2026-03-09T00:15:39.980 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.901786006Z level=info msg="Executing migration" id="Add unique index for dashboard_org_id_folder_uid_title" 2026-03-09T00:15:39.980 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.902354611Z level=info msg="Migration successfully executed" id="Add unique index for dashboard_org_id_folder_uid_title" duration=568.675µs 2026-03-09T00:15:39.980 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.903453808Z level=info msg="Executing migration" id="Delete unique index for dashboard_org_id_folder_id_title" 2026-03-09T00:15:39.980 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.903911274Z level=info msg="Migration successfully executed" id="Delete unique index for dashboard_org_id_folder_id_title" duration=461.263µs 2026-03-09T00:15:39.981 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.905461606Z level=info msg="Executing migration" id="Delete unique index for dashboard_org_id_folder_uid_title" 2026-03-09T00:15:39.981 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.905920054Z level=info msg="Migration successfully executed" id="Delete unique index for dashboard_org_id_folder_uid_title" duration=458.798µs 2026-03-09T00:15:39.981 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.906412064Z level=info msg="Executing migration" id="Add unique index for dashboard_org_id_folder_uid_title_is_folder" 2026-03-09T00:15:39.981 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.90697057Z level=info msg="Migration successfully executed" id="Add unique index for dashboard_org_id_folder_uid_title_is_folder" duration=548.518µs 2026-03-09T00:15:39.981 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.907408399Z level=info msg="Executing migration" id="Restore index for dashboard_org_id_folder_id_title" 2026-03-09T00:15:39.981 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.907866628Z level=info msg="Migration successfully executed" id="Restore index for dashboard_org_id_folder_id_title" duration=457.987µs 2026-03-09T00:15:39.981 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.908330385Z level=info msg="Executing migration" id="create sso_setting table" 2026-03-09T00:15:39.981 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.908821804Z level=info msg="Migration successfully executed" id="create sso_setting table" duration=491.169µs 2026-03-09T00:15:39.981 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.909324525Z level=info msg="Executing migration" id="copy kvstore migration status to each org" 2026-03-09T00:15:39.981 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.909777332Z level=info msg="Migration successfully executed" id="copy kvstore migration status to each org" duration=453.499µs 2026-03-09T00:15:39.981 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.910260828Z level=info msg="Executing migration" id="add back entry for orgid=0 migrated status" 2026-03-09T00:15:39.981 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.910517589Z level=info msg="Migration successfully executed" id="add back entry for orgid=0 migrated status" duration=257.212µs 2026-03-09T00:15:39.981 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.910919561Z level=info msg="Executing migration" id="alter kv_store.value to longtext" 2026-03-09T00:15:39.981 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.910944688Z level=info msg="Migration successfully executed" id="alter kv_store.value to longtext" duration=25.668µs 2026-03-09T00:15:39.981 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.911465493Z level=info msg="Executing migration" id="add notification_settings column to alert_rule table" 2026-03-09T00:15:39.981 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.914323601Z level=info msg="Migration successfully executed" id="add notification_settings column to alert_rule table" duration=2.856175ms 2026-03-09T00:15:39.981 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.91485657Z level=info msg="Executing migration" id="add notification_settings column to alert_rule_version table" 2026-03-09T00:15:39.981 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.917515555Z level=info msg="Migration successfully executed" id="add notification_settings column to alert_rule_version table" duration=2.658315ms 2026-03-09T00:15:39.981 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.917952393Z level=info msg="Executing migration" id="removing scope from alert.instances:read action migration" 2026-03-09T00:15:39.981 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.918149062Z level=info msg="Migration successfully executed" id="removing scope from alert.instances:read action migration" duration=196.739µs 2026-03-09T00:15:39.981 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=migrator t=2026-03-09T00:15:39.918596799Z level=info msg="migrations completed" performed=169 skipped=378 duration=453.412553ms 2026-03-09T00:15:39.981 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=sqlstore t=2026-03-09T00:15:39.919065657Z level=info msg="Created default organization" 2026-03-09T00:15:39.981 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=secrets t=2026-03-09T00:15:39.921351835Z level=info msg="Envelope encryption state" enabled=true currentprovider=secretKey.v1 2026-03-09T00:15:39.981 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=plugin.store t=2026-03-09T00:15:39.932890582Z level=info msg="Loading plugins..." 2026-03-09T00:15:39.981 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=local.finder t=2026-03-09T00:15:39.973552133Z level=warn msg="Skipping finding plugins as directory does not exist" path=/usr/share/grafana/plugins-bundled 2026-03-09T00:15:40.242 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=plugin.store t=2026-03-09T00:15:39.973570167Z level=info msg="Plugins loaded" count=55 duration=40.680656ms 2026-03-09T00:15:40.242 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=query_data t=2026-03-09T00:15:39.977475616Z level=info msg="Query Service initialization" 2026-03-09T00:15:40.242 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=live.push_http t=2026-03-09T00:15:39.987731181Z level=info msg="Live Push Gateway initialization" 2026-03-09T00:15:40.242 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=ngalert.migration t=2026-03-09T00:15:39.988983545Z level=info msg=Starting 2026-03-09T00:15:40.242 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=ngalert t=2026-03-09T00:15:39.992734454Z level=warn msg="Unexpected number of rows updating alert configuration history" rows=0 org=1 hash=not-yet-calculated 2026-03-09T00:15:40.242 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=ngalert.state.manager t=2026-03-09T00:15:39.993620894Z level=info msg="Running in alternative execution of Error/NoData mode" 2026-03-09T00:15:40.242 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=infra.usagestats.collector t=2026-03-09T00:15:39.994653296Z level=info msg="registering usage stat providers" usageStatsProvidersLen=2 2026-03-09T00:15:40.242 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=provisioning.datasources t=2026-03-09T00:15:39.996638141Z level=info msg="deleted datasource based on configuration" name=Dashboard1 2026-03-09T00:15:40.242 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=provisioning.datasources t=2026-03-09T00:15:39.996895673Z level=info msg="inserting datasource from configuration" name=Dashboard1 uid=P43CA22E17D0F9596 2026-03-09T00:15:40.242 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:40 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=provisioning.alerting t=2026-03-09T00:15:40.006760336Z level=info msg="starting to provision alerting" 2026-03-09T00:15:40.242 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:40 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=provisioning.alerting t=2026-03-09T00:15:40.006882465Z level=info msg="finished to provision alerting" 2026-03-09T00:15:40.242 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:40 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=http.server t=2026-03-09T00:15:40.008172509Z level=info msg="HTTP Server TLS settings" MinTLSVersion=TLS1.2 configuredciphers=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA 2026-03-09T00:15:40.242 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:40 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=http.server t=2026-03-09T00:15:40.008500963Z level=info msg="HTTP Server Listen" address=[::]:3000 protocol=https subUrl= socket= 2026-03-09T00:15:40.242 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:40 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=ngalert.state.manager t=2026-03-09T00:15:40.008746012Z level=info msg="Warming state cache for startup" 2026-03-09T00:15:40.242 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:40 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=ngalert.state.manager t=2026-03-09T00:15:40.008952358Z level=info msg="State cache has been initialized" states=0 duration=205.875µs 2026-03-09T00:15:40.242 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:40 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=ngalert.multiorg.alertmanager t=2026-03-09T00:15:40.009127396Z level=info msg="Starting MultiOrg Alertmanager" 2026-03-09T00:15:40.242 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:40 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=ngalert.scheduler t=2026-03-09T00:15:40.009181036Z level=info msg="Starting scheduler" tickInterval=10s maxAttempts=1 2026-03-09T00:15:40.242 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:40 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=ticker t=2026-03-09T00:15:40.009243143Z level=info msg=starting first_tick=2026-03-09T00:15:50Z 2026-03-09T00:15:40.242 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:40 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=provisioning.dashboard t=2026-03-09T00:15:40.009730575Z level=info msg="starting to provision dashboards" 2026-03-09T00:15:40.242 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:40 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=grafanaStorageLogger t=2026-03-09T00:15:40.011422301Z level=info msg="Storage starting" 2026-03-09T00:15:40.242 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:40 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=sqlstore.transactions t=2026-03-09T00:15:40.064389203Z level=info msg="Database locked, sleeping then retrying" error="database is locked" retry=0 code="database is locked" 2026-03-09T00:15:40.242 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:40 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=sqlstore.transactions t=2026-03-09T00:15:40.099342742Z level=info msg="Database locked, sleeping then retrying" error="database is locked" retry=1 code="database is locked" 2026-03-09T00:15:40.242 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:40 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=plugins.update.checker t=2026-03-09T00:15:40.115485785Z level=info msg="Update check succeeded" duration=87.019173ms 2026-03-09T00:15:40.506 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:40 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:40.506 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:40 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:40.506 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:40 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:15:40.506 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:40 vm10 ceph-mon[48982]: pgmap v12: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 0 B/s wr, 6 op/s 2026-03-09T00:15:40.507 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:40 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=grafana-apiserver t=2026-03-09T00:15:40.241098781Z level=info msg="Adding GroupVersion playlist.grafana.app v0alpha1 to ResourceManager" 2026-03-09T00:15:40.507 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:40 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=grafana-apiserver t=2026-03-09T00:15:40.241560776Z level=info msg="Adding GroupVersion featuretoggle.grafana.app v0alpha1 to ResourceManager" 2026-03-09T00:15:40.507 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:15:40 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=provisioning.dashboard t=2026-03-09T00:15:40.268882551Z level=info msg="finished to provision dashboards" 2026-03-09T00:15:40.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:40 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:40.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:40 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:40.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:40 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:15:40.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:40 vm04 ceph-mon[46823]: pgmap v12: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 0 B/s wr, 6 op/s 2026-03-09T00:15:40.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:40 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:40.602 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:40 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:40.602 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:40 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:15:40.602 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:40 vm04 ceph-mon[51053]: pgmap v12: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 0 B/s wr, 6 op/s 2026-03-09T00:15:42.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:41 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:42.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:41 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:42.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:41 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:42.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:41 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:42.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:41 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:42.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:41 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:42.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:41 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:42.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:41 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:42.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:41 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:42.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:41 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:42.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:41 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:42.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:41 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:42.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:41 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:42.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:41 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:42.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:41 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:42.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:41 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:42.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:41 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:42.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:41 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:42.636 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:15:42 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: [09/Mar/2026:00:15:42] ENGINE Bus STOPPING 2026-03-09T00:15:42.636 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:15:42 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: [09/Mar/2026:00:15:42] ENGINE HTTP Server cherrypy._cpwsgi_server.CPWSGIServer(('::', 9283)) shut down 2026-03-09T00:15:42.636 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:15:42 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: [09/Mar/2026:00:15:42] ENGINE Bus STOPPED 2026-03-09T00:15:42.636 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:15:42 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: [09/Mar/2026:00:15:42] ENGINE Bus STARTING 2026-03-09T00:15:43.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:42 vm10 ceph-mon[48982]: pgmap v13: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 511 B/s rd, 0 op/s 2026-03-09T00:15:43.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:42 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:43.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:42 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:43.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:42 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:15:43.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:42 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:15:43.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:42 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:43.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:42 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-09T00:15:43.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:42 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://vm10.local:3000"}]: dispatch 2026-03-09T00:15:43.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:42 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:43.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:42 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:15:43.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:42 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:15:43.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:42 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:15:43.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:42 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:15:43.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:42 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:15:43.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:42 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:15:43.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:42 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:15:43.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:42 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:15:43.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:42 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:15:43.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:42 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:15:43.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:42 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:15:43.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:42 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:15:43.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:42 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:15:43.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:42 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:15:43.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:42 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:15:43.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:42 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:43.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:42 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]: dispatch 2026-03-09T00:15:43.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:42 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]': finished 2026-03-09T00:15:43.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:42 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:15:43.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:42 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon"}]': finished 2026-03-09T00:15:43.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:42 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]: dispatch 2026-03-09T00:15:43.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:42 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]': finished 2026-03-09T00:15:43.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:42 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd"}]: dispatch 2026-03-09T00:15:43.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:42 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mds"}]: dispatch 2026-03-09T00:15:43.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:42 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mds"}]': finished 2026-03-09T00:15:43.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:42 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw"}]: dispatch 2026-03-09T00:15:43.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:42 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]: dispatch 2026-03-09T00:15:43.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:42 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]': finished 2026-03-09T00:15:43.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:42 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:15:43.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:42 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]: dispatch 2026-03-09T00:15:43.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:42 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]': finished 2026-03-09T00:15:43.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:42 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.iscsi"}]: dispatch 2026-03-09T00:15:43.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:42 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]: dispatch 2026-03-09T00:15:43.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:42 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]': finished 2026-03-09T00:15:43.080 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:42 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]: dispatch 2026-03-09T00:15:43.080 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:42 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]': finished 2026-03-09T00:15:43.080 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:42 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:15:43.080 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:42 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:15:43.080 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:42 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:15:43.080 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:42 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:15:43.080 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:42 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:15:43.080 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:42 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:15:43.080 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:42 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]: dispatch 2026-03-09T00:15:43.080 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:42 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd='[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]': finished 2026-03-09T00:15:43.080 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:42 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:15:43.100 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:15:42 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: [09/Mar/2026:00:15:42] ENGINE Serving on http://:::9283 2026-03-09T00:15:43.101 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:15:42 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: [09/Mar/2026:00:15:42] ENGINE Bus STARTED 2026-03-09T00:15:43.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[51053]: pgmap v13: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 511 B/s rd, 0 op/s 2026-03-09T00:15:43.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:43.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:43.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:15:43.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:15:43.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:43.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-09T00:15:43.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://vm10.local:3000"}]: dispatch 2026-03-09T00:15:43.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:43.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:15:43.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:15:43.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:15:43.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:15:43.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:15:43.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:15:43.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:15:43.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:15:43.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:15:43.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:15:43.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:15:43.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:15:43.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:15:43.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:15:43.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:15:43.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:43.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]: dispatch 2026-03-09T00:15:43.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]': finished 2026-03-09T00:15:43.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:15:43.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon"}]': finished 2026-03-09T00:15:43.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]: dispatch 2026-03-09T00:15:43.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]': finished 2026-03-09T00:15:43.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd"}]: dispatch 2026-03-09T00:15:43.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mds"}]: dispatch 2026-03-09T00:15:43.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mds"}]': finished 2026-03-09T00:15:43.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw"}]: dispatch 2026-03-09T00:15:43.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]: dispatch 2026-03-09T00:15:43.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]': finished 2026-03-09T00:15:43.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:15:43.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]: dispatch 2026-03-09T00:15:43.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]': finished 2026-03-09T00:15:43.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.iscsi"}]: dispatch 2026-03-09T00:15:43.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]: dispatch 2026-03-09T00:15:43.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]': finished 2026-03-09T00:15:43.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]: dispatch 2026-03-09T00:15:43.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]': finished 2026-03-09T00:15:43.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:15:43.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:15:43.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:15:43.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:15:43.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:15:43.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:15:43.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]: dispatch 2026-03-09T00:15:43.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd='[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]': finished 2026-03-09T00:15:43.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:15:43.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[46823]: pgmap v13: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 511 B/s rd, 0 op/s 2026-03-09T00:15:43.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:43.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:43.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:15:43.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:15:43.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:43.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-09T00:15:43.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://vm10.local:3000"}]: dispatch 2026-03-09T00:15:43.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:43.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:15:43.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:15:43.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:15:43.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:15:43.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:15:43.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:15:43.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:15:43.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:15:43.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:15:43.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:15:43.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:15:43.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:15:43.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:15:43.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:15:43.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:15:43.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:43.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]: dispatch 2026-03-09T00:15:43.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]': finished 2026-03-09T00:15:43.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:15:43.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon"}]': finished 2026-03-09T00:15:43.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]: dispatch 2026-03-09T00:15:43.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]': finished 2026-03-09T00:15:43.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd"}]: dispatch 2026-03-09T00:15:43.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mds"}]: dispatch 2026-03-09T00:15:43.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mds"}]': finished 2026-03-09T00:15:43.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw"}]: dispatch 2026-03-09T00:15:43.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]: dispatch 2026-03-09T00:15:43.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]': finished 2026-03-09T00:15:43.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:15:43.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]: dispatch 2026-03-09T00:15:43.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]': finished 2026-03-09T00:15:43.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.iscsi"}]: dispatch 2026-03-09T00:15:43.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]: dispatch 2026-03-09T00:15:43.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]': finished 2026-03-09T00:15:43.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]: dispatch 2026-03-09T00:15:43.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]': finished 2026-03-09T00:15:43.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:15:43.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:15:43.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:15:43.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:15:43.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:15:43.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:15:43.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]: dispatch 2026-03-09T00:15:43.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd='[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]': finished 2026-03-09T00:15:43.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:42 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:15:44.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:44 vm04 ceph-mon[51053]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:15:44.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:44 vm04 ceph-mon[51053]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-09T00:15:44.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:44 vm04 ceph-mon[51053]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://vm10.local:3000"}]: dispatch 2026-03-09T00:15:44.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:44 vm04 ceph-mon[51053]: Upgrade: Finalizing container_image settings 2026-03-09T00:15:44.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:44 vm04 ceph-mon[51053]: Upgrade: Complete! 2026-03-09T00:15:44.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:44 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:44.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:44 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:44.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:44 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:15:44.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:44 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:15:44.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:44 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:44.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:44 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:15:44.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:44 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:15:44.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:44 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:15:44.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:44 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:44.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:44 vm04 ceph-mon[46823]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:15:44.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:44 vm04 ceph-mon[46823]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-09T00:15:44.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:44 vm04 ceph-mon[46823]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://vm10.local:3000"}]: dispatch 2026-03-09T00:15:44.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:44 vm04 ceph-mon[46823]: Upgrade: Finalizing container_image settings 2026-03-09T00:15:44.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:44 vm04 ceph-mon[46823]: Upgrade: Complete! 2026-03-09T00:15:44.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:44 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:44.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:44 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:44.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:44 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:15:44.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:44 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:15:44.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:44 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:44.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:44 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:15:44.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:44 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:15:44.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:44 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:15:44.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:44 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:45.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:44 vm10 ceph-mon[48982]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:15:45.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:44 vm10 ceph-mon[48982]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-09T00:15:45.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:44 vm10 ceph-mon[48982]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://vm10.local:3000"}]: dispatch 2026-03-09T00:15:45.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:44 vm10 ceph-mon[48982]: Upgrade: Finalizing container_image settings 2026-03-09T00:15:45.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:44 vm10 ceph-mon[48982]: Upgrade: Complete! 2026-03-09T00:15:45.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:44 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:45.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:44 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:45.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:44 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:15:45.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:44 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:15:45.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:44 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:45.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:44 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:15:45.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:44 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:15:45.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:44 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:15:45.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:44 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:45.351 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:15:45 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:15:45] "GET /metrics HTTP/1.1" 200 37556 "" "Prometheus/2.51.0" 2026-03-09T00:15:45.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:45 vm10 ceph-mon[48982]: pgmap v14: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 938 B/s rd, 0 op/s 2026-03-09T00:15:45.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:45 vm04 ceph-mon[51053]: pgmap v14: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 938 B/s rd, 0 op/s 2026-03-09T00:15:45.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:45 vm04 ceph-mon[46823]: pgmap v14: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 938 B/s rd, 0 op/s 2026-03-09T00:15:46.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:46 vm10 ceph-mon[48982]: pgmap v15: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:15:46.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:46 vm04 ceph-mon[51053]: pgmap v15: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:15:46.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:46 vm04 ceph-mon[46823]: pgmap v15: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:15:47.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:15:46 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:15:46.995Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:15:47.351 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:15:46 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:15:46.996Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:15:48.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:48 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:48.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:48 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:48.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:48 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:49.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:49 vm10 ceph-mon[48982]: pgmap v16: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:15:49.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:49 vm04 ceph-mon[51053]: pgmap v16: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:15:49.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:49 vm04 ceph-mon[46823]: pgmap v16: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:15:50.229 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph versions | jq -e '"'"'.mgr | length == 1'"'"'' 2026-03-09T00:15:50.799 INFO:teuthology.orchestra.run.vm04.stdout:true 2026-03-09T00:15:50.932 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph versions | jq -e '"'"'.mgr | keys'"'"' | grep $sha1' 2026-03-09T00:15:51.079 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:51 vm04 ceph-mon[46823]: pgmap v17: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:15:51.079 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:51 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/786067479' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:15:51.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:51 vm04 ceph-mon[51053]: pgmap v17: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:15:51.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:51 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/786067479' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:15:51.459 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)" 2026-03-09T00:15:51.492 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph versions | jq -e '"'"'.overall | length == 2'"'"'' 2026-03-09T00:15:51.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:51 vm10 ceph-mon[48982]: pgmap v17: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:15:51.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:51 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/786067479' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:15:52.010 INFO:teuthology.orchestra.run.vm04.stdout:true 2026-03-09T00:15:52.079 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch upgrade check quay.ceph.io/ceph-ci/ceph:$sha1 | jq -e '"'"'.up_to_date | length == 2'"'"'' 2026-03-09T00:15:52.261 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:52 vm04 ceph-mon[46823]: from='client.25135 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:15:52.261 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:52 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/2015395096' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:15:52.261 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:52 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/617602044' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:15:52.261 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:52 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:15:52.261 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:52 vm04 ceph-mon[51053]: from='client.25135 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:15:52.261 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:52 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/2015395096' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:15:52.261 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:52 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/617602044' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:15:52.261 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:52 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:15:52.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:52 vm10 ceph-mon[48982]: from='client.25135 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:15:52.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:52 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/2015395096' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:15:52.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:52 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/617602044' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:15:52.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:52 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:15:53.091 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:53 vm04 ceph-mon[51053]: pgmap v18: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:15:53.091 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:53 vm04 ceph-mon[46823]: pgmap v18: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:15:53.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:53 vm10 ceph-mon[48982]: pgmap v18: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:15:54.225 INFO:teuthology.orchestra.run.vm04.stdout:true 2026-03-09T00:15:54.280 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch upgrade status' 2026-03-09T00:15:54.305 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:54 vm04 ceph-mon[46823]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:15:54.305 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:54 vm04 ceph-mon[46823]: from='client.15261 -' entity='client.admin' cmd=[{"prefix": "orch upgrade check", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:15:54.305 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:54 vm04 ceph-mon[51053]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:15:54.305 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:54 vm04 ceph-mon[51053]: from='client.15261 -' entity='client.admin' cmd=[{"prefix": "orch upgrade check", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:15:54.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:54 vm10 ceph-mon[48982]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:15:54.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:54 vm10 ceph-mon[48982]: from='client.15261 -' entity='client.admin' cmd=[{"prefix": "orch upgrade check", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:15:54.749 INFO:teuthology.orchestra.run.vm04.stdout:{ 2026-03-09T00:15:54.749 INFO:teuthology.orchestra.run.vm04.stdout: "target_image": null, 2026-03-09T00:15:54.749 INFO:teuthology.orchestra.run.vm04.stdout: "in_progress": false, 2026-03-09T00:15:54.749 INFO:teuthology.orchestra.run.vm04.stdout: "which": "", 2026-03-09T00:15:54.749 INFO:teuthology.orchestra.run.vm04.stdout: "services_complete": [], 2026-03-09T00:15:54.749 INFO:teuthology.orchestra.run.vm04.stdout: "progress": null, 2026-03-09T00:15:54.749 INFO:teuthology.orchestra.run.vm04.stdout: "message": "", 2026-03-09T00:15:54.749 INFO:teuthology.orchestra.run.vm04.stdout: "is_paused": false 2026-03-09T00:15:54.749 INFO:teuthology.orchestra.run.vm04.stdout:} 2026-03-09T00:15:54.827 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph health detail' 2026-03-09T00:15:55.058 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:15:55 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:15:55] "GET /metrics HTTP/1.1" 200 37553 "" "Prometheus/2.51.0" 2026-03-09T00:15:55.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:55 vm04 ceph-mon[46823]: pgmap v19: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:15:55.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:55 vm04 ceph-mon[51053]: pgmap v19: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:15:55.381 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_OK 2026-03-09T00:15:55.423 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types mon --hosts $(ceph orch ps | grep mgr.x | awk '"'"'{print $2}'"'"')' 2026-03-09T00:15:55.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:55 vm10 ceph-mon[48982]: pgmap v19: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:15:56.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:56 vm10 ceph-mon[48982]: from='client.15267 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:15:56.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:56 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/3461108613' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T00:15:56.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:56 vm04 ceph-mon[51053]: from='client.15267 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:15:56.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:56 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/3461108613' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T00:15:56.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:56 vm04 ceph-mon[46823]: from='client.15267 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:15:56.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:56 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/3461108613' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T00:15:57.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:57 vm04 ceph-mon[51053]: pgmap v20: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:15:57.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:57 vm04 ceph-mon[51053]: from='client.15279 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:15:57.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:57 vm04 ceph-mon[51053]: from='client.25171 -' entity='client.admin' cmd=[{"prefix": "orch upgrade start", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "daemon_types": "mon", "hosts": "vm10", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:15:57.351 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:15:56 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:15:56.995Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:15:57.351 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:15:56 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:15:56.996Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:15:57.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:57 vm04 ceph-mon[46823]: pgmap v20: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:15:57.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:57 vm04 ceph-mon[46823]: from='client.15279 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:15:57.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:57 vm04 ceph-mon[46823]: from='client.25171 -' entity='client.admin' cmd=[{"prefix": "orch upgrade start", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "daemon_types": "mon", "hosts": "vm10", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:15:57.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:57 vm10 ceph-mon[48982]: pgmap v20: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:15:57.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:57 vm10 ceph-mon[48982]: from='client.15279 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:15:57.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:57 vm10 ceph-mon[48982]: from='client.25171 -' entity='client.admin' cmd=[{"prefix": "orch upgrade start", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "daemon_types": "mon", "hosts": "vm10", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:15:57.653 INFO:teuthology.orchestra.run.vm04.stdout:Initiating upgrade to quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T00:15:57.712 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'while ceph orch upgrade status | jq '"'"'.in_progress'"'"' | grep true && ! ceph orch upgrade status | jq '"'"'.message'"'"' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; sleep 30 ; done' 2026-03-09T00:15:58.224 INFO:teuthology.orchestra.run.vm04.stdout:true 2026-03-09T00:15:58.609 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-09T00:15:58.609 INFO:teuthology.orchestra.run.vm04.stdout:alertmanager.a vm04 *:9093,9094 running (12m) 17s ago 19m 28.2M - 0.25.0 c8568f914cd2 bcac0140b0f6 2026-03-09T00:15:58.609 INFO:teuthology.orchestra.run.vm04.stdout:grafana.a vm10 *:3000 running (19s) 14s ago 19m 64.8M - 10.4.0 c8b91775d855 aa7f793dcb8e 2026-03-09T00:15:58.609 INFO:teuthology.orchestra.run.vm04.stdout:iscsi.foo.vm04.fbyciv vm04 running (26s) 17s ago 18m 43.5M - 3.5 e1d6a67b021e cdb4168e72eb 2026-03-09T00:15:58.609 INFO:teuthology.orchestra.run.vm04.stdout:mgr.x vm10 *:8443,9283,8765 running (30s) 14s ago 21m 488M - 19.2.3-678-ge911bdeb 654f31e6858e c971ca6e9652 2026-03-09T00:15:58.609 INFO:teuthology.orchestra.run.vm04.stdout:mgr.y vm04 *:8443,9283,8765 running (9m) 17s ago 21m 553M - 19.2.3-678-ge911bdeb 654f31e6858e 72a51572b51b 2026-03-09T00:15:58.609 INFO:teuthology.orchestra.run.vm04.stdout:mon.a vm04 running (21m) 17s ago 21m 76.0M 2048M 17.2.0 e1d6a67b021e a0a441d060f5 2026-03-09T00:15:58.609 INFO:teuthology.orchestra.run.vm04.stdout:mon.b vm10 running (21m) 14s ago 21m 58.7M 2048M 17.2.0 e1d6a67b021e a4c3c4f2dde9 2026-03-09T00:15:58.609 INFO:teuthology.orchestra.run.vm04.stdout:mon.c vm04 running (21m) 17s ago 21m 54.5M 2048M 17.2.0 e1d6a67b021e 5c2d9165643c 2026-03-09T00:15:58.609 INFO:teuthology.orchestra.run.vm04.stdout:node-exporter.a vm04 *:9100 running (12m) 17s ago 19m 9957k - 1.7.0 72c9c2088986 38e0af6b2fbf 2026-03-09T00:15:58.609 INFO:teuthology.orchestra.run.vm04.stdout:node-exporter.b vm10 *:9100 running (12m) 14s ago 19m 10.1M - 1.7.0 72c9c2088986 d059c0022310 2026-03-09T00:15:58.609 INFO:teuthology.orchestra.run.vm04.stdout:osd.0 vm04 running (21m) 17s ago 21m 55.8M 4096M 17.2.0 e1d6a67b021e eb4d6ee04c91 2026-03-09T00:15:58.609 INFO:teuthology.orchestra.run.vm04.stdout:osd.1 vm04 running (20m) 17s ago 20m 55.4M 4096M 17.2.0 e1d6a67b021e f112f05700b8 2026-03-09T00:15:58.609 INFO:teuthology.orchestra.run.vm04.stdout:osd.2 vm04 running (20m) 17s ago 20m 52.5M 4096M 17.2.0 e1d6a67b021e a4ed5ecab7e4 2026-03-09T00:15:58.609 INFO:teuthology.orchestra.run.vm04.stdout:osd.3 vm04 running (20m) 17s ago 20m 54.6M 4096M 17.2.0 e1d6a67b021e d530f6e786d9 2026-03-09T00:15:58.609 INFO:teuthology.orchestra.run.vm04.stdout:osd.4 vm10 running (20m) 14s ago 20m 54.8M 4096M 17.2.0 e1d6a67b021e ad302e6f363c 2026-03-09T00:15:58.609 INFO:teuthology.orchestra.run.vm04.stdout:osd.5 vm10 running (20m) 14s ago 20m 53.4M 4096M 17.2.0 e1d6a67b021e 1bef86fdb303 2026-03-09T00:15:58.609 INFO:teuthology.orchestra.run.vm04.stdout:osd.6 vm10 running (20m) 14s ago 20m 52.3M 4096M 17.2.0 e1d6a67b021e 168db5828111 2026-03-09T00:15:58.609 INFO:teuthology.orchestra.run.vm04.stdout:osd.7 vm10 running (19m) 14s ago 19m 56.8M 4096M 17.2.0 e1d6a67b021e bc6bbac15079 2026-03-09T00:15:58.609 INFO:teuthology.orchestra.run.vm04.stdout:prometheus.a vm10 *:9095 running (31s) 14s ago 19m 49.8M - 2.51.0 1d3b7f56885b 1f53121cfa7f 2026-03-09T00:15:58.609 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.ehrfsf vm04 *:8000 running (19m) 17s ago 19m 98.5M - 17.2.0 e1d6a67b021e a815abb0c790 2026-03-09T00:15:58.609 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm10.dwizvi vm10 *:8000 running (19m) 14s ago 19m 96.6M - 17.2.0 e1d6a67b021e f6412acdf6e0 2026-03-09T00:15:58.843 INFO:teuthology.orchestra.run.vm04.stdout:{ 2026-03-09T00:15:58.843 INFO:teuthology.orchestra.run.vm04.stdout: "mon": { 2026-03-09T00:15:58.843 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 3 2026-03-09T00:15:58.843 INFO:teuthology.orchestra.run.vm04.stdout: }, 2026-03-09T00:15:58.843 INFO:teuthology.orchestra.run.vm04.stdout: "mgr": { 2026-03-09T00:15:58.843 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-09T00:15:58.844 INFO:teuthology.orchestra.run.vm04.stdout: }, 2026-03-09T00:15:58.844 INFO:teuthology.orchestra.run.vm04.stdout: "osd": { 2026-03-09T00:15:58.844 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 8 2026-03-09T00:15:58.844 INFO:teuthology.orchestra.run.vm04.stdout: }, 2026-03-09T00:15:58.844 INFO:teuthology.orchestra.run.vm04.stdout: "mds": {}, 2026-03-09T00:15:58.844 INFO:teuthology.orchestra.run.vm04.stdout: "rgw": { 2026-03-09T00:15:58.844 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 2 2026-03-09T00:15:58.844 INFO:teuthology.orchestra.run.vm04.stdout: }, 2026-03-09T00:15:58.844 INFO:teuthology.orchestra.run.vm04.stdout: "overall": { 2026-03-09T00:15:58.844 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 13, 2026-03-09T00:15:58.844 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-09T00:15:58.844 INFO:teuthology.orchestra.run.vm04.stdout: } 2026-03-09T00:15:58.844 INFO:teuthology.orchestra.run.vm04.stdout:} 2026-03-09T00:15:59.043 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:58 vm04 ceph-mon[46823]: pgmap v21: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:15:59.043 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:58 vm04 ceph-mon[46823]: Upgrade: Started with target quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T00:15:59.043 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:58 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:59.043 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:58 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:15:59.043 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:58 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:15:59.043 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:58 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:15:59.043 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:58 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:59.043 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:58 vm04 ceph-mon[46823]: Upgrade: First pull of quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T00:15:59.044 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:58 vm04 ceph-mon[51053]: pgmap v21: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:15:59.044 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:58 vm04 ceph-mon[51053]: Upgrade: Started with target quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T00:15:59.044 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:58 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:59.044 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:58 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:15:59.044 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:58 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:15:59.044 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:58 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:15:59.044 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:58 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:59.044 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:58 vm04 ceph-mon[51053]: Upgrade: First pull of quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T00:15:59.044 INFO:teuthology.orchestra.run.vm04.stdout:{ 2026-03-09T00:15:59.044 INFO:teuthology.orchestra.run.vm04.stdout: "target_image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", 2026-03-09T00:15:59.044 INFO:teuthology.orchestra.run.vm04.stdout: "in_progress": true, 2026-03-09T00:15:59.044 INFO:teuthology.orchestra.run.vm04.stdout: "which": "Upgrading daemons of type(s) mon on host(s) vm10", 2026-03-09T00:15:59.044 INFO:teuthology.orchestra.run.vm04.stdout: "services_complete": [], 2026-03-09T00:15:59.044 INFO:teuthology.orchestra.run.vm04.stdout: "progress": "", 2026-03-09T00:15:59.044 INFO:teuthology.orchestra.run.vm04.stdout: "message": "Doing first pull of quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df image", 2026-03-09T00:15:59.044 INFO:teuthology.orchestra.run.vm04.stdout: "is_paused": false 2026-03-09T00:15:59.044 INFO:teuthology.orchestra.run.vm04.stdout:} 2026-03-09T00:15:59.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:58 vm10 ceph-mon[48982]: pgmap v21: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:15:59.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:58 vm10 ceph-mon[48982]: Upgrade: Started with target quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T00:15:59.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:58 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:59.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:58 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:15:59.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:58 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:15:59.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:58 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:15:59.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:58 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:59.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:58 vm10 ceph-mon[48982]: Upgrade: First pull of quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T00:15:59.928 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:59 vm10 ceph-mon[48982]: from='client.25177 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:15:59.929 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:59 vm10 ceph-mon[48982]: from='client.25180 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:15:59.929 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:59 vm10 ceph-mon[48982]: from='client.15300 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:15:59.929 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:59 vm10 ceph-mon[48982]: from='client.? 192.168.123.104:0/1225295252' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:15:59.929 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:59 vm10 ceph-mon[48982]: from='client.25189 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:15:59.929 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:59 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:59.929 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:59 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:15:59.929 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:59 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:15:59.929 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:59 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:15:59.929 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:59 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "quorum_status"}]: dispatch 2026-03-09T00:15:59.929 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:15:59 vm10 ceph-mon[48982]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "mon ok-to-stop", "ids": ["b"]}]: dispatch 2026-03-09T00:16:00.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:59 vm04 ceph-mon[51053]: from='client.25177 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:16:00.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:59 vm04 ceph-mon[51053]: from='client.25180 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:16:00.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:59 vm04 ceph-mon[51053]: from='client.15300 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:16:00.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:59 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/1225295252' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:00.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:59 vm04 ceph-mon[51053]: from='client.25189 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:16:00.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:59 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:16:00.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:59 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:16:00.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:59 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:00.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:59 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:16:00.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:59 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "quorum_status"}]: dispatch 2026-03-09T00:16:00.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:15:59 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "mon ok-to-stop", "ids": ["b"]}]: dispatch 2026-03-09T00:16:00.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:59 vm04 ceph-mon[46823]: from='client.25177 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:16:00.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:59 vm04 ceph-mon[46823]: from='client.25180 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:16:00.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:59 vm04 ceph-mon[46823]: from='client.15300 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:16:00.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:59 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/1225295252' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:00.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:59 vm04 ceph-mon[46823]: from='client.25189 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:16:00.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:59 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:16:00.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:59 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:16:00.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:59 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:00.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:59 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:16:00.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:59 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "quorum_status"}]: dispatch 2026-03-09T00:16:00.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:15:59 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "mon ok-to-stop", "ids": ["b"]}]: dispatch 2026-03-09T00:16:00.200 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 systemd[1]: Stopping Ceph mon.b for fdcbddf6-1b49-11f1-80b0-7392062373f9... 2026-03-09T00:16:00.532 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mon-b[48978]: 2026-03-09T00:16:00.198+0000 7f8ac4d79700 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-mon -n mon.b -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false (PID: 1) UID: 0 2026-03-09T00:16:00.532 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mon-b[48978]: 2026-03-09T00:16:00.198+0000 7f8ac4d79700 -1 mon.b@2(peon) e3 *** Got Signal Terminated *** 2026-03-09T00:16:00.532 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 podman[81948]: 2026-03-09 00:16:00.281000555 +0000 UTC m=+0.094318236 container died a4c3c4f2dde9e60233cf8307ce2ead7965f12ed268bd8dd3532f36b59c8fee2f (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mon-b, vendor=Red Hat, Inc., release=754, version=8, CEPH_POINT_RELEASE=-17.2.0, architecture=x86_64, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, com.redhat.component=centos-stream-container, distribution-scope=public, GIT_BRANCH=HEAD, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, io.k8s.display-name=CentOS Stream 8, GIT_CLEAN=True, io.openshift.tags=base centos centos-stream, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, io.buildah.version=1.19.8, io.openshift.expose-services=, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, build-date=2022-05-03T08:36:31.336870, GIT_REPO=https://github.com/ceph/ceph-container.git, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., name=centos-stream, RELEASE=HEAD, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, vcs-type=git, ceph=True, maintainer=Guillaume Abrioux , GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac) 2026-03-09T00:16:00.532 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 podman[81948]: 2026-03-09 00:16:00.299539913 +0000 UTC m=+0.112857594 container remove a4c3c4f2dde9e60233cf8307ce2ead7965f12ed268bd8dd3532f36b59c8fee2f (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mon-b, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.tags=base centos centos-stream, release=754, vcs-type=git, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, version=8, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, io.buildah.version=1.19.8, io.openshift.expose-services=, name=centos-stream, GIT_BRANCH=HEAD, architecture=x86_64, distribution-scope=public, maintainer=Guillaume Abrioux , vendor=Red Hat, Inc., com.redhat.license_terms=https://centos.org/legal/licensing-policy/, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, RELEASE=HEAD, ceph=True, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, io.k8s.display-name=CentOS Stream 8, build-date=2022-05-03T08:36:31.336870, com.redhat.component=centos-stream-container, CEPH_POINT_RELEASE=-17.2.0, GIT_CLEAN=True, GIT_REPO=https://github.com/ceph/ceph-container.git, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com) 2026-03-09T00:16:00.532 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 bash[81948]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mon-b 2026-03-09T00:16:00.532 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 systemd[1]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@mon.b.service: Deactivated successfully. 2026-03-09T00:16:00.532 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 systemd[1]: Stopped Ceph mon.b for fdcbddf6-1b49-11f1-80b0-7392062373f9. 2026-03-09T00:16:00.532 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 systemd[1]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@mon.b.service: Consumed 12.745s CPU time. 2026-03-09T00:16:00.532 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 systemd[1]: Starting Ceph mon.b for fdcbddf6-1b49-11f1-80b0-7392062373f9... 2026-03-09T00:16:00.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 podman[82061]: 2026-03-09 00:16:00.619578466 +0000 UTC m=+0.016719233 container create b102ade927dfdc95c6946ae58ddfa512f36fc7615c0075195eb14b3bbd47c54e (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mon-b, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team , io.buildah.version=1.41.3, org.label-schema.build-date=20260223, OSD_FLAVOR=default, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True) 2026-03-09T00:16:00.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 podman[82061]: 2026-03-09 00:16:00.66233068 +0000 UTC m=+0.059471447 container init b102ade927dfdc95c6946ae58ddfa512f36fc7615c0075195eb14b3bbd47c54e (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mon-b, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=squid, io.buildah.version=1.41.3, org.label-schema.build-date=20260223) 2026-03-09T00:16:00.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 podman[82061]: 2026-03-09 00:16:00.664851787 +0000 UTC m=+0.061992555 container start b102ade927dfdc95c6946ae58ddfa512f36fc7615c0075195eb14b3bbd47c54e (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mon-b, org.label-schema.build-date=20260223, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team , OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid) 2026-03-09T00:16:00.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 bash[82061]: b102ade927dfdc95c6946ae58ddfa512f36fc7615c0075195eb14b3bbd47c54e 2026-03-09T00:16:00.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 podman[82061]: 2026-03-09 00:16:00.612299802 +0000 UTC m=+0.009440579 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc 2026-03-09T00:16:00.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 systemd[1]: Started Ceph mon.b for fdcbddf6-1b49-11f1-80b0-7392062373f9. 2026-03-09T00:16:00.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: set uid:gid to 167:167 (ceph:ceph) 2026-03-09T00:16:00.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable), process ceph-mon, pid 2 2026-03-09T00:16:00.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: pidfile_write: ignore empty --pid-file 2026-03-09T00:16:00.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: load: jerasure load: lrc 2026-03-09T00:16:00.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: RocksDB version: 7.9.2 2026-03-09T00:16:00.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Git sha 0 2026-03-09T00:16:00.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Compile date 2026-02-25 18:11:04 2026-03-09T00:16:00.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: DB SUMMARY 2026-03-09T00:16:00.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: DB Session ID: I61B7O2L6EWIJV00RLQD 2026-03-09T00:16:00.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: CURRENT file: CURRENT 2026-03-09T00:16:00.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: IDENTITY file: IDENTITY 2026-03-09T00:16:00.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: MANIFEST file: MANIFEST-000009 size: 2076 Bytes 2026-03-09T00:16:00.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: SST files in /var/lib/ceph/mon/ceph-b/store.db dir, Total Num: 1, files: 000042.sst 2026-03-09T00:16:00.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Write Ahead Log file in /var/lib/ceph/mon/ceph-b/store.db: 000040.log size: 0 ; 2026-03-09T00:16:00.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.error_if_exists: 0 2026-03-09T00:16:00.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.create_if_missing: 0 2026-03-09T00:16:00.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.paranoid_checks: 1 2026-03-09T00:16:00.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.flush_verify_memtable_count: 1 2026-03-09T00:16:00.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.track_and_verify_wals_in_manifest: 0 2026-03-09T00:16:00.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.verify_sst_unique_id_in_manifest: 1 2026-03-09T00:16:00.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.env: 0x56325446fdc0 2026-03-09T00:16:00.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.fs: PosixFileSystem 2026-03-09T00:16:00.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.info_log: 0x563255bd37e0 2026-03-09T00:16:00.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.max_file_opening_threads: 16 2026-03-09T00:16:00.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.statistics: (nil) 2026-03-09T00:16:00.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.use_fsync: 0 2026-03-09T00:16:00.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.max_log_file_size: 0 2026-03-09T00:16:00.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.max_manifest_file_size: 1073741824 2026-03-09T00:16:00.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.log_file_time_to_roll: 0 2026-03-09T00:16:00.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.keep_log_file_num: 1000 2026-03-09T00:16:00.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.recycle_log_file_num: 0 2026-03-09T00:16:00.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.allow_fallocate: 1 2026-03-09T00:16:00.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.allow_mmap_reads: 0 2026-03-09T00:16:00.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.allow_mmap_writes: 0 2026-03-09T00:16:00.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.use_direct_reads: 0 2026-03-09T00:16:00.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.use_direct_io_for_flush_and_compaction: 0 2026-03-09T00:16:00.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.create_missing_column_families: 0 2026-03-09T00:16:00.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.db_log_dir: 2026-03-09T00:16:00.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.wal_dir: 2026-03-09T00:16:00.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.table_cache_numshardbits: 6 2026-03-09T00:16:00.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.WAL_ttl_seconds: 0 2026-03-09T00:16:00.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.WAL_size_limit_MB: 0 2026-03-09T00:16:00.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.max_write_batch_group_size_bytes: 1048576 2026-03-09T00:16:00.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.manifest_preallocation_size: 4194304 2026-03-09T00:16:00.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.is_fd_close_on_exec: 1 2026-03-09T00:16:00.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.advise_random_on_open: 1 2026-03-09T00:16:00.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.db_write_buffer_size: 0 2026-03-09T00:16:00.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.write_buffer_manager: 0x563255bd7900 2026-03-09T00:16:00.831 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.access_hint_on_compaction_start: 1 2026-03-09T00:16:00.831 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.random_access_max_buffer_size: 1048576 2026-03-09T00:16:00.831 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.use_adaptive_mutex: 0 2026-03-09T00:16:00.831 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.rate_limiter: (nil) 2026-03-09T00:16:00.831 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.sst_file_manager.rate_bytes_per_sec: 0 2026-03-09T00:16:00.831 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.wal_recovery_mode: 2 2026-03-09T00:16:00.831 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.enable_thread_tracking: 0 2026-03-09T00:16:00.831 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.enable_pipelined_write: 0 2026-03-09T00:16:00.831 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.unordered_write: 0 2026-03-09T00:16:00.831 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.allow_concurrent_memtable_write: 1 2026-03-09T00:16:00.831 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.enable_write_thread_adaptive_yield: 1 2026-03-09T00:16:00.831 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.write_thread_max_yield_usec: 100 2026-03-09T00:16:00.831 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.write_thread_slow_yield_usec: 3 2026-03-09T00:16:00.831 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.row_cache: None 2026-03-09T00:16:00.831 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.wal_filter: None 2026-03-09T00:16:00.831 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.avoid_flush_during_recovery: 0 2026-03-09T00:16:00.831 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.allow_ingest_behind: 0 2026-03-09T00:16:00.831 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.two_write_queues: 0 2026-03-09T00:16:00.831 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.manual_wal_flush: 0 2026-03-09T00:16:00.831 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.wal_compression: 0 2026-03-09T00:16:00.831 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.atomic_flush: 0 2026-03-09T00:16:00.831 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.avoid_unnecessary_blocking_io: 0 2026-03-09T00:16:00.831 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.persist_stats_to_disk: 0 2026-03-09T00:16:00.831 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.write_dbid_to_manifest: 0 2026-03-09T00:16:00.831 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.log_readahead_size: 0 2026-03-09T00:16:00.831 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.file_checksum_gen_factory: Unknown 2026-03-09T00:16:00.831 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.best_efforts_recovery: 0 2026-03-09T00:16:00.831 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.max_bgerror_resume_count: 2147483647 2026-03-09T00:16:00.831 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.bgerror_resume_retry_interval: 1000000 2026-03-09T00:16:00.831 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.allow_data_in_errors: 0 2026-03-09T00:16:00.831 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.db_host_id: __hostname__ 2026-03-09T00:16:00.831 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.enforce_single_del_contracts: true 2026-03-09T00:16:00.831 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.max_background_jobs: 2 2026-03-09T00:16:00.831 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.max_background_compactions: -1 2026-03-09T00:16:00.831 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.max_subcompactions: 1 2026-03-09T00:16:00.831 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.avoid_flush_during_shutdown: 0 2026-03-09T00:16:00.831 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.writable_file_max_buffer_size: 1048576 2026-03-09T00:16:00.831 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.delayed_write_rate : 16777216 2026-03-09T00:16:00.831 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.max_total_wal_size: 0 2026-03-09T00:16:00.831 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.delete_obsolete_files_period_micros: 21600000000 2026-03-09T00:16:00.831 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.stats_dump_period_sec: 600 2026-03-09T00:16:00.831 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.stats_persist_period_sec: 600 2026-03-09T00:16:00.831 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.stats_history_buffer_size: 1048576 2026-03-09T00:16:00.831 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.max_open_files: -1 2026-03-09T00:16:00.831 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.bytes_per_sync: 0 2026-03-09T00:16:00.831 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.wal_bytes_per_sync: 0 2026-03-09T00:16:00.831 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.strict_bytes_per_sync: 0 2026-03-09T00:16:00.831 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.compaction_readahead_size: 0 2026-03-09T00:16:00.831 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.max_background_flushes: -1 2026-03-09T00:16:00.831 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Compression algorithms supported: 2026-03-09T00:16:00.831 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: kZSTD supported: 0 2026-03-09T00:16:00.831 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: kXpressCompression supported: 0 2026-03-09T00:16:00.831 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: kBZip2Compression supported: 0 2026-03-09T00:16:00.831 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: kZSTDNotFinalCompression supported: 0 2026-03-09T00:16:00.831 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: kLZ4Compression supported: 1 2026-03-09T00:16:00.831 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: kZlibCompression supported: 1 2026-03-09T00:16:00.831 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: kLZ4HCCompression supported: 1 2026-03-09T00:16:00.831 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: kSnappyCompression supported: 1 2026-03-09T00:16:00.831 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Fast CRC32 supported: Supported on x86 2026-03-09T00:16:00.831 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: DMutex implementation: pthread_mutex_t 2026-03-09T00:16:00.831 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: [db/version_set.cc:5527] Recovering from manifest file: /var/lib/ceph/mon/ceph-b/store.db/MANIFEST-000009 2026-03-09T00:16:00.831 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [default]: 2026-03-09T00:16:00.831 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.comparator: leveldb.BytewiseComparator 2026-03-09T00:16:00.831 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.merge_operator: 2026-03-09T00:16:00.831 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.compaction_filter: None 2026-03-09T00:16:00.831 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.compaction_filter_factory: None 2026-03-09T00:16:00.832 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.sst_partitioner_factory: None 2026-03-09T00:16:00.832 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.memtable_factory: SkipListFactory 2026-03-09T00:16:00.832 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.table_factory: BlockBasedTable 2026-03-09T00:16:00.832 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: table_factory options: flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x563255bd3440) 2026-03-09T00:16:00.832 INFO:journalctl@ceph.mon.b.vm10.stdout: cache_index_and_filter_blocks: 1 2026-03-09T00:16:00.832 INFO:journalctl@ceph.mon.b.vm10.stdout: cache_index_and_filter_blocks_with_high_priority: 0 2026-03-09T00:16:00.832 INFO:journalctl@ceph.mon.b.vm10.stdout: pin_l0_filter_and_index_blocks_in_cache: 0 2026-03-09T00:16:00.832 INFO:journalctl@ceph.mon.b.vm10.stdout: pin_top_level_index_and_filter: 1 2026-03-09T00:16:00.832 INFO:journalctl@ceph.mon.b.vm10.stdout: index_type: 0 2026-03-09T00:16:00.832 INFO:journalctl@ceph.mon.b.vm10.stdout: data_block_index_type: 0 2026-03-09T00:16:00.832 INFO:journalctl@ceph.mon.b.vm10.stdout: index_shortening: 1 2026-03-09T00:16:00.832 INFO:journalctl@ceph.mon.b.vm10.stdout: data_block_hash_table_util_ratio: 0.750000 2026-03-09T00:16:00.832 INFO:journalctl@ceph.mon.b.vm10.stdout: checksum: 4 2026-03-09T00:16:00.832 INFO:journalctl@ceph.mon.b.vm10.stdout: no_block_cache: 0 2026-03-09T00:16:00.832 INFO:journalctl@ceph.mon.b.vm10.stdout: block_cache: 0x563255bf69b0 2026-03-09T00:16:00.832 INFO:journalctl@ceph.mon.b.vm10.stdout: block_cache_name: BinnedLRUCache 2026-03-09T00:16:00.832 INFO:journalctl@ceph.mon.b.vm10.stdout: block_cache_options: 2026-03-09T00:16:00.832 INFO:journalctl@ceph.mon.b.vm10.stdout: capacity : 536870912 2026-03-09T00:16:00.832 INFO:journalctl@ceph.mon.b.vm10.stdout: num_shard_bits : 4 2026-03-09T00:16:00.832 INFO:journalctl@ceph.mon.b.vm10.stdout: strict_capacity_limit : 0 2026-03-09T00:16:00.832 INFO:journalctl@ceph.mon.b.vm10.stdout: high_pri_pool_ratio: 0.000 2026-03-09T00:16:00.832 INFO:journalctl@ceph.mon.b.vm10.stdout: block_cache_compressed: (nil) 2026-03-09T00:16:00.832 INFO:journalctl@ceph.mon.b.vm10.stdout: persistent_cache: (nil) 2026-03-09T00:16:00.832 INFO:journalctl@ceph.mon.b.vm10.stdout: block_size: 4096 2026-03-09T00:16:00.832 INFO:journalctl@ceph.mon.b.vm10.stdout: block_size_deviation: 10 2026-03-09T00:16:00.832 INFO:journalctl@ceph.mon.b.vm10.stdout: block_restart_interval: 16 2026-03-09T00:16:00.832 INFO:journalctl@ceph.mon.b.vm10.stdout: index_block_restart_interval: 1 2026-03-09T00:16:00.832 INFO:journalctl@ceph.mon.b.vm10.stdout: metadata_block_size: 4096 2026-03-09T00:16:00.832 INFO:journalctl@ceph.mon.b.vm10.stdout: partition_filters: 0 2026-03-09T00:16:00.832 INFO:journalctl@ceph.mon.b.vm10.stdout: use_delta_encoding: 1 2026-03-09T00:16:00.832 INFO:journalctl@ceph.mon.b.vm10.stdout: filter_policy: bloomfilter 2026-03-09T00:16:00.832 INFO:journalctl@ceph.mon.b.vm10.stdout: whole_key_filtering: 1 2026-03-09T00:16:00.832 INFO:journalctl@ceph.mon.b.vm10.stdout: verify_compression: 0 2026-03-09T00:16:00.832 INFO:journalctl@ceph.mon.b.vm10.stdout: read_amp_bytes_per_bit: 0 2026-03-09T00:16:00.832 INFO:journalctl@ceph.mon.b.vm10.stdout: format_version: 5 2026-03-09T00:16:00.832 INFO:journalctl@ceph.mon.b.vm10.stdout: enable_index_compression: 1 2026-03-09T00:16:00.832 INFO:journalctl@ceph.mon.b.vm10.stdout: block_align: 0 2026-03-09T00:16:00.832 INFO:journalctl@ceph.mon.b.vm10.stdout: max_auto_readahead_size: 262144 2026-03-09T00:16:00.832 INFO:journalctl@ceph.mon.b.vm10.stdout: prepopulate_block_cache: 0 2026-03-09T00:16:00.832 INFO:journalctl@ceph.mon.b.vm10.stdout: initial_auto_readahead_size: 8192 2026-03-09T00:16:00.832 INFO:journalctl@ceph.mon.b.vm10.stdout: num_file_reads_for_auto_readahead: 2 2026-03-09T00:16:00.832 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.write_buffer_size: 33554432 2026-03-09T00:16:00.832 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.max_write_buffer_number: 2 2026-03-09T00:16:00.832 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.compression: NoCompression 2026-03-09T00:16:00.832 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.bottommost_compression: Disabled 2026-03-09T00:16:00.832 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.prefix_extractor: nullptr 2026-03-09T00:16:00.832 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.memtable_insert_with_hint_prefix_extractor: nullptr 2026-03-09T00:16:00.832 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.num_levels: 7 2026-03-09T00:16:00.832 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.min_write_buffer_number_to_merge: 1 2026-03-09T00:16:00.832 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.max_write_buffer_number_to_maintain: 0 2026-03-09T00:16:00.832 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.max_write_buffer_size_to_maintain: 0 2026-03-09T00:16:00.832 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.bottommost_compression_opts.window_bits: -14 2026-03-09T00:16:00.832 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.bottommost_compression_opts.level: 32767 2026-03-09T00:16:00.832 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.bottommost_compression_opts.strategy: 0 2026-03-09T00:16:00.832 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.bottommost_compression_opts.max_dict_bytes: 0 2026-03-09T00:16:00.832 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.bottommost_compression_opts.zstd_max_train_bytes: 0 2026-03-09T00:16:00.832 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.bottommost_compression_opts.parallel_threads: 1 2026-03-09T00:16:00.832 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.bottommost_compression_opts.enabled: false 2026-03-09T00:16:00.832 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.bottommost_compression_opts.max_dict_buffer_bytes: 0 2026-03-09T00:16:00.832 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.bottommost_compression_opts.use_zstd_dict_trainer: true 2026-03-09T00:16:00.832 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.compression_opts.window_bits: -14 2026-03-09T00:16:00.832 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.compression_opts.level: 32767 2026-03-09T00:16:00.832 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.compression_opts.strategy: 0 2026-03-09T00:16:00.832 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.compression_opts.max_dict_bytes: 0 2026-03-09T00:16:00.832 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.compression_opts.zstd_max_train_bytes: 0 2026-03-09T00:16:00.832 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.compression_opts.use_zstd_dict_trainer: true 2026-03-09T00:16:00.832 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.compression_opts.parallel_threads: 1 2026-03-09T00:16:00.833 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.compression_opts.enabled: false 2026-03-09T00:16:00.833 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.compression_opts.max_dict_buffer_bytes: 0 2026-03-09T00:16:00.833 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.level0_file_num_compaction_trigger: 4 2026-03-09T00:16:00.833 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.level0_slowdown_writes_trigger: 20 2026-03-09T00:16:00.833 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.level0_stop_writes_trigger: 36 2026-03-09T00:16:00.833 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.target_file_size_base: 67108864 2026-03-09T00:16:00.833 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.target_file_size_multiplier: 1 2026-03-09T00:16:00.833 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.max_bytes_for_level_base: 268435456 2026-03-09T00:16:00.833 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.level_compaction_dynamic_level_bytes: 1 2026-03-09T00:16:00.833 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.max_bytes_for_level_multiplier: 10.000000 2026-03-09T00:16:00.833 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1 2026-03-09T00:16:00.833 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1 2026-03-09T00:16:00.833 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1 2026-03-09T00:16:00.833 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1 2026-03-09T00:16:00.833 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1 2026-03-09T00:16:00.833 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1 2026-03-09T00:16:00.833 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1 2026-03-09T00:16:00.833 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.max_sequential_skip_in_iterations: 8 2026-03-09T00:16:00.833 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.max_compaction_bytes: 1677721600 2026-03-09T00:16:00.833 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.ignore_max_compaction_bytes_for_input: true 2026-03-09T00:16:00.833 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.arena_block_size: 1048576 2026-03-09T00:16:00.833 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.soft_pending_compaction_bytes_limit: 68719476736 2026-03-09T00:16:00.833 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.hard_pending_compaction_bytes_limit: 274877906944 2026-03-09T00:16:00.833 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.disable_auto_compactions: 0 2026-03-09T00:16:00.833 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.compaction_style: kCompactionStyleLevel 2026-03-09T00:16:00.833 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.compaction_pri: kMinOverlappingRatio 2026-03-09T00:16:00.833 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.compaction_options_universal.size_ratio: 1 2026-03-09T00:16:00.833 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.compaction_options_universal.min_merge_width: 2 2026-03-09T00:16:00.833 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295 2026-03-09T00:16:00.833 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200 2026-03-09T00:16:00.833 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1 2026-03-09T00:16:00.833 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize 2026-03-09T00:16:00.833 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824 2026-03-09T00:16:00.833 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0 2026-03-09T00:16:00.833 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0); 2026-03-09T00:16:00.833 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.inplace_update_support: 0 2026-03-09T00:16:00.833 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.inplace_update_num_locks: 10000 2026-03-09T00:16:00.833 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.memtable_prefix_bloom_size_ratio: 0.000000 2026-03-09T00:16:00.833 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.memtable_whole_key_filtering: 0 2026-03-09T00:16:00.833 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.memtable_huge_page_size: 0 2026-03-09T00:16:00.833 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.bloom_locality: 0 2026-03-09T00:16:00.833 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.max_successive_merges: 0 2026-03-09T00:16:00.833 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.optimize_filters_for_hits: 0 2026-03-09T00:16:00.833 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.paranoid_file_checks: 0 2026-03-09T00:16:00.833 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.force_consistency_checks: 1 2026-03-09T00:16:00.833 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.report_bg_io_stats: 0 2026-03-09T00:16:00.833 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.ttl: 2592000 2026-03-09T00:16:00.833 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.periodic_compaction_seconds: 0 2026-03-09T00:16:00.833 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.preclude_last_level_data_seconds: 0 2026-03-09T00:16:00.833 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.preserve_internal_time_seconds: 0 2026-03-09T00:16:00.833 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.enable_blob_files: false 2026-03-09T00:16:00.833 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.min_blob_size: 0 2026-03-09T00:16:00.833 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.blob_file_size: 268435456 2026-03-09T00:16:00.833 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.blob_compression_type: NoCompression 2026-03-09T00:16:00.833 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.enable_blob_garbage_collection: false 2026-03-09T00:16:00.833 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.blob_garbage_collection_age_cutoff: 0.250000 2026-03-09T00:16:00.833 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000 2026-03-09T00:16:00.833 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.blob_compaction_readahead_size: 0 2026-03-09T00:16:00.833 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.blob_file_starting_level: 0 2026-03-09T00:16:00.833 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: Options.experimental_mempurge_threshold: 0.000000 2026-03-09T00:16:00.833 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: [table/block_based/block_based_table_reader.cc:721] At least one SST file opened without unique ID to verify: 42.sst 2026-03-09T00:16:00.833 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed. 2026-03-09T00:16:00.833 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: [db/version_set.cc:5566] Recovered from manifest file:/var/lib/ceph/mon/ceph-b/store.db/MANIFEST-000009 succeeded,manifest_file_number is 9, next_file_number is 44, last_sequence is 23813, log_number is 40,prev_log_number is 0,max_column_family is 0,min_log_number_to_keep is 0 2026-03-09T00:16:00.833 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: [db/version_set.cc:5581] Column family [default] (ID 0), log number is 40 2026-03-09T00:16:00.833 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: [db/db_impl/db_impl_open.cc:539] DB ID: 91f3a0de-b18c-4e1e-8bfe-ee6d40c493ed 2026-03-09T00:16:00.833 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: EVENT_LOG_v1 {"time_micros": 1773015360689715, "job": 1, "event": "recovery_started", "wal_files": [40]} 2026-03-09T00:16:00.833 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: [db/db_impl/db_impl_open.cc:1043] Recovering log #40 mode 2 2026-03-09T00:16:00.834 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: EVENT_LOG_v1 {"time_micros": 1773015360689744, "job": 1, "event": "recovery_finished"} 2026-03-09T00:16:00.834 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: [db/version_set.cc:5047] Creating manifest 46 2026-03-09T00:16:00.834 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed. 2026-03-09T00:16:00.834 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: [db/db_impl/db_impl_open.cc:1987] SstFileManager instance 0x563255bf8e00 2026-03-09T00:16:00.834 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: rocksdb: DB pointer 0x563255d0a000 2026-03-09T00:16:00.834 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: starting mon.b rank 2 at public addrs [v2:192.168.123.110:3300/0,v1:192.168.123.110:6789/0] at bind addrs [v2:192.168.123.110:3300/0,v1:192.168.123.110:6789/0] mon_data /var/lib/ceph/mon/ceph-b fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 2026-03-09T00:16:00.834 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: mon.b@-1(???) e3 preinit fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 2026-03-09T00:16:00.834 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: mon.b@-1(???).mds e0 Unable to load 'last_metadata' 2026-03-09T00:16:00.834 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: mon.b@-1(???).mds e0 Unable to load 'last_metadata' 2026-03-09T00:16:00.834 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: mon.b@-1(???).mds e1 new map 2026-03-09T00:16:00.834 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: mon.b@-1(???).mds e1 print_map 2026-03-09T00:16:00.834 INFO:journalctl@ceph.mon.b.vm10.stdout: e1 2026-03-09T00:16:00.834 INFO:journalctl@ceph.mon.b.vm10.stdout: btime 1970-01-01T00:00:00:000000+0000 2026-03-09T00:16:00.834 INFO:journalctl@ceph.mon.b.vm10.stdout: enable_multiple, ever_enabled_multiple: 1,1 2026-03-09T00:16:00.834 INFO:journalctl@ceph.mon.b.vm10.stdout: default compat: compat={},rocompat={},incompat={1=base v0.20,2=client writeable ranges,3=default file layouts on dirs,4=dir inode in separate object,5=mds uses versioned encoding,6=dirfrag is stored in omap,8=no anchor table,9=file layout v2,10=snaprealm v2} 2026-03-09T00:16:00.834 INFO:journalctl@ceph.mon.b.vm10.stdout: legacy client fscid: -1 2026-03-09T00:16:00.834 INFO:journalctl@ceph.mon.b.vm10.stdout: 2026-03-09T00:16:00.834 INFO:journalctl@ceph.mon.b.vm10.stdout: No filesystems configured 2026-03-09T00:16:00.834 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: mon.b@-1(???).osd e96 crush map has features 3314933000854323200, adjusting msgr requires 2026-03-09T00:16:00.834 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: mon.b@-1(???).osd e96 crush map has features 432629239337189376, adjusting msgr requires 2026-03-09T00:16:00.834 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: mon.b@-1(???).osd e96 crush map has features 432629239337189376, adjusting msgr requires 2026-03-09T00:16:00.834 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: mon.b@-1(???).osd e96 crush map has features 432629239337189376, adjusting msgr requires 2026-03-09T00:16:00.834 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: mon.b@-1(???).paxosservice(auth 1..22) refresh upgraded, format 0 -> 3 2026-03-09T00:16:00.834 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: mon.b@-1(???).mgr e0 loading version 41 2026-03-09T00:16:00.834 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: mon.b@-1(???).mgr e41 active server: [v2:192.168.123.104:6800/1485512100,v1:192.168.123.104:6801/1485512100](25000) 2026-03-09T00:16:00.834 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:00 vm10 ceph-mon[82076]: mon.b@-1(???).mgr e41 mkfs or daemon transitioned to available, loading commands 2026-03-09T00:16:02.030 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:01 vm10 ceph-mon[82076]: mon.b calling monitor election 2026-03-09T00:16:02.030 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:01 vm10 ceph-mon[82076]: mon.c calling monitor election 2026-03-09T00:16:02.030 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:01 vm10 ceph-mon[82076]: mon.a calling monitor election 2026-03-09T00:16:02.030 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:01 vm10 ceph-mon[82076]: mon.a is new leader, mons a,c,b in quorum (ranks 0,1,2) 2026-03-09T00:16:02.030 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:01 vm10 ceph-mon[82076]: monmap e3: 3 mons at {a=[v2:192.168.123.104:3300/0,v1:192.168.123.104:6789/0],b=[v2:192.168.123.110:3300/0,v1:192.168.123.110:6789/0],c=[v2:192.168.123.104:3301/0,v1:192.168.123.104:6790/0]} 2026-03-09T00:16:02.030 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:01 vm10 ceph-mon[82076]: fsmap 2026-03-09T00:16:02.030 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:01 vm10 ceph-mon[82076]: osdmap e96: 8 total, 8 up, 8 in 2026-03-09T00:16:02.030 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:01 vm10 ceph-mon[82076]: mgrmap e41: y(active, since 40s), standbys: x 2026-03-09T00:16:02.030 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:01 vm10 ceph-mon[82076]: overall HEALTH_OK 2026-03-09T00:16:02.030 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:01 vm10 ceph-mon[82076]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:16:02.030 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:01 vm10 ceph-mon[82076]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:16:02.030 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:01 vm10 ceph-mon[82076]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:16:02.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:01 vm04 ceph-mon[51053]: mon.b calling monitor election 2026-03-09T00:16:02.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:01 vm04 ceph-mon[51053]: mon.c calling monitor election 2026-03-09T00:16:02.352 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:01 vm04 ceph-mon[51053]: mon.a calling monitor election 2026-03-09T00:16:02.352 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:01 vm04 ceph-mon[51053]: mon.a is new leader, mons a,c,b in quorum (ranks 0,1,2) 2026-03-09T00:16:02.352 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:01 vm04 ceph-mon[51053]: monmap e3: 3 mons at {a=[v2:192.168.123.104:3300/0,v1:192.168.123.104:6789/0],b=[v2:192.168.123.110:3300/0,v1:192.168.123.110:6789/0],c=[v2:192.168.123.104:3301/0,v1:192.168.123.104:6790/0]} 2026-03-09T00:16:02.352 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:01 vm04 ceph-mon[51053]: fsmap 2026-03-09T00:16:02.352 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:01 vm04 ceph-mon[51053]: osdmap e96: 8 total, 8 up, 8 in 2026-03-09T00:16:02.352 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:01 vm04 ceph-mon[51053]: mgrmap e41: y(active, since 40s), standbys: x 2026-03-09T00:16:02.352 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:01 vm04 ceph-mon[51053]: overall HEALTH_OK 2026-03-09T00:16:02.352 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:01 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:16:02.352 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:01 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:16:02.352 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:01 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:16:02.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:01 vm04 ceph-mon[46823]: mon.b calling monitor election 2026-03-09T00:16:02.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:01 vm04 ceph-mon[46823]: mon.c calling monitor election 2026-03-09T00:16:02.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:01 vm04 ceph-mon[46823]: mon.a calling monitor election 2026-03-09T00:16:02.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:01 vm04 ceph-mon[46823]: mon.a is new leader, mons a,c,b in quorum (ranks 0,1,2) 2026-03-09T00:16:02.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:01 vm04 ceph-mon[46823]: monmap e3: 3 mons at {a=[v2:192.168.123.104:3300/0,v1:192.168.123.104:6789/0],b=[v2:192.168.123.110:3300/0,v1:192.168.123.110:6789/0],c=[v2:192.168.123.104:3301/0,v1:192.168.123.104:6790/0]} 2026-03-09T00:16:02.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:01 vm04 ceph-mon[46823]: fsmap 2026-03-09T00:16:02.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:01 vm04 ceph-mon[46823]: osdmap e96: 8 total, 8 up, 8 in 2026-03-09T00:16:02.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:01 vm04 ceph-mon[46823]: mgrmap e41: y(active, since 40s), standbys: x 2026-03-09T00:16:02.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:01 vm04 ceph-mon[46823]: overall HEALTH_OK 2026-03-09T00:16:02.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:01 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:16:02.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:01 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:16:02.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:01 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:16:03.026 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:03 vm10 ceph-mon[82076]: pgmap v23: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:16:03.027 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:03 vm10 ceph-mon[82076]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:16:03.027 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:03 vm10 ceph-mon[82076]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:16:03.027 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:03 vm10 ceph-mon[82076]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:16:03.027 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:03 vm10 ceph-mon[82076]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:16:03.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:03 vm04 ceph-mon[51053]: pgmap v23: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:16:03.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:03 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:16:03.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:03 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:16:03.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:03 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:16:03.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:03 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:16:03.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:03 vm04 ceph-mon[46823]: pgmap v23: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:16:03.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:03 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:16:03.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:03 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:16:03.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:03 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:16:03.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:03 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:16:04.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[46823]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:16:04.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:16:04.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:16:04.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:16:04.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:16:04.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:16:04.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:16:04.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:04.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:04.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:16:04.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:04.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:16:04.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:04.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:16:04.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:04.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:16:04.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.b"}]: dispatch 2026-03-09T00:16:04.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon.b"}]': finished 2026-03-09T00:16:04.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:04.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:16:04.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:04.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:16:04.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:04.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:16:04.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:04.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.b"}]: dispatch 2026-03-09T00:16:04.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:04.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.b"}]: dispatch 2026-03-09T00:16:04.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:04.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.b"}]: dispatch 2026-03-09T00:16:04.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:04.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.b"}]: dispatch 2026-03-09T00:16:04.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:04.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.b"}]: dispatch 2026-03-09T00:16:04.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:04.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.b"}]: dispatch 2026-03-09T00:16:04.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]: dispatch 2026-03-09T00:16:04.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]': finished 2026-03-09T00:16:04.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:16:04.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon"}]': finished 2026-03-09T00:16:04.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]: dispatch 2026-03-09T00:16:04.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]': finished 2026-03-09T00:16:04.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd"}]: dispatch 2026-03-09T00:16:04.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mds"}]: dispatch 2026-03-09T00:16:04.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mds"}]': finished 2026-03-09T00:16:04.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw"}]: dispatch 2026-03-09T00:16:04.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]: dispatch 2026-03-09T00:16:04.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]': finished 2026-03-09T00:16:04.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:16:04.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]: dispatch 2026-03-09T00:16:04.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]': finished 2026-03-09T00:16:04.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.iscsi"}]: dispatch 2026-03-09T00:16:04.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]: dispatch 2026-03-09T00:16:04.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]': finished 2026-03-09T00:16:04.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]: dispatch 2026-03-09T00:16:04.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]': finished 2026-03-09T00:16:04.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:16:04.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:16:04.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:16:04.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:16:04.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:16:04.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:16:04.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]: dispatch 2026-03-09T00:16:04.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd='[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]': finished 2026-03-09T00:16:04.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:16:04.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:16:04.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:16:04.353 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:16:04.353 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:16:04.353 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:16:04.353 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:16:04.353 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:16:04.353 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[51053]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:16:04.353 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:16:04.353 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:16:04.353 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:16:04.353 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:16:04.353 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:16:04.353 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:16:04.353 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:04.353 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:04.353 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:16:04.353 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:04.353 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:16:04.353 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:04.353 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:16:04.353 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:04.353 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:16:04.353 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.b"}]: dispatch 2026-03-09T00:16:04.353 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon.b"}]': finished 2026-03-09T00:16:04.353 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:04.353 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:16:04.353 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:04.353 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:16:04.353 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:04.353 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:16:04.353 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:04.353 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.b"}]: dispatch 2026-03-09T00:16:04.353 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:04.353 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.b"}]: dispatch 2026-03-09T00:16:04.353 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:04.353 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.b"}]: dispatch 2026-03-09T00:16:04.353 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:04.353 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.b"}]: dispatch 2026-03-09T00:16:04.353 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:04.353 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.b"}]: dispatch 2026-03-09T00:16:04.353 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:04.353 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.b"}]: dispatch 2026-03-09T00:16:04.353 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]: dispatch 2026-03-09T00:16:04.353 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]': finished 2026-03-09T00:16:04.353 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:16:04.353 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon"}]': finished 2026-03-09T00:16:04.353 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]: dispatch 2026-03-09T00:16:04.353 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]': finished 2026-03-09T00:16:04.353 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd"}]: dispatch 2026-03-09T00:16:04.353 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mds"}]: dispatch 2026-03-09T00:16:04.353 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mds"}]': finished 2026-03-09T00:16:04.353 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw"}]: dispatch 2026-03-09T00:16:04.353 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]: dispatch 2026-03-09T00:16:04.353 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]': finished 2026-03-09T00:16:04.353 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:16:04.353 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]: dispatch 2026-03-09T00:16:04.354 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]': finished 2026-03-09T00:16:04.354 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.iscsi"}]: dispatch 2026-03-09T00:16:04.354 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]: dispatch 2026-03-09T00:16:04.354 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]': finished 2026-03-09T00:16:04.354 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]: dispatch 2026-03-09T00:16:04.354 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]': finished 2026-03-09T00:16:04.354 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:16:04.354 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:16:04.354 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:16:04.354 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:16:04.354 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:16:04.354 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:16:04.354 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]: dispatch 2026-03-09T00:16:04.354 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd='[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]': finished 2026-03-09T00:16:04.354 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:16:04.354 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:16:04.354 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:16:04.354 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:16:04.354 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:16:04.354 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:16:04.354 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:16:04.354 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:04 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:16:04.480 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:04 vm10 ceph-mon[82076]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:16:04.480 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:04 vm10 ceph-mon[82076]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:16:04.480 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:04 vm10 ceph-mon[82076]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:16:04.480 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:04 vm10 ceph-mon[82076]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:16:04.480 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:04 vm10 ceph-mon[82076]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:16:04.480 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:04 vm10 ceph-mon[82076]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:16:04.480 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:04 vm10 ceph-mon[82076]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:16:04.480 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:04 vm10 ceph-mon[82076]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:04.480 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:04 vm10 ceph-mon[82076]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:04.480 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:04 vm10 ceph-mon[82076]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:16:04.480 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:04 vm10 ceph-mon[82076]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:04.480 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:04 vm10 ceph-mon[82076]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:16:04.480 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:04 vm10 ceph-mon[82076]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:04.480 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:04 vm10 ceph-mon[82076]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:16:04.480 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:04 vm10 ceph-mon[82076]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:04.480 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:04 vm10 ceph-mon[82076]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:16:04.480 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:04 vm10 ceph-mon[82076]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.b"}]: dispatch 2026-03-09T00:16:04.480 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:04 vm10 ceph-mon[82076]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon.b"}]': finished 2026-03-09T00:16:04.480 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:04 vm10 ceph-mon[82076]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:04.480 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:04 vm10 ceph-mon[82076]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:16:04.480 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:04 vm10 ceph-mon[82076]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:04.480 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:04 vm10 ceph-mon[82076]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:16:04.480 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:04 vm10 ceph-mon[82076]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:04.480 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:04 vm10 ceph-mon[82076]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:16:04.480 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:04 vm10 ceph-mon[82076]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:04.481 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:04 vm10 ceph-mon[82076]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.b"}]: dispatch 2026-03-09T00:16:04.481 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:04 vm10 ceph-mon[82076]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:04.481 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:04 vm10 ceph-mon[82076]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.b"}]: dispatch 2026-03-09T00:16:04.481 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:04 vm10 ceph-mon[82076]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:04.481 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:04 vm10 ceph-mon[82076]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.b"}]: dispatch 2026-03-09T00:16:04.481 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:04 vm10 ceph-mon[82076]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:04.481 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:04 vm10 ceph-mon[82076]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.b"}]: dispatch 2026-03-09T00:16:04.481 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:04 vm10 ceph-mon[82076]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:04.481 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:04 vm10 ceph-mon[82076]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.b"}]: dispatch 2026-03-09T00:16:04.481 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:04 vm10 ceph-mon[82076]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:04.481 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:04 vm10 ceph-mon[82076]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.b"}]: dispatch 2026-03-09T00:16:04.481 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:04 vm10 ceph-mon[82076]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]: dispatch 2026-03-09T00:16:04.481 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:04 vm10 ceph-mon[82076]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]': finished 2026-03-09T00:16:04.481 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:04 vm10 ceph-mon[82076]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:16:04.481 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:04 vm10 ceph-mon[82076]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon"}]': finished 2026-03-09T00:16:04.481 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:04 vm10 ceph-mon[82076]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]: dispatch 2026-03-09T00:16:04.481 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:04 vm10 ceph-mon[82076]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]': finished 2026-03-09T00:16:04.481 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:04 vm10 ceph-mon[82076]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd"}]: dispatch 2026-03-09T00:16:04.481 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:04 vm10 ceph-mon[82076]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mds"}]: dispatch 2026-03-09T00:16:04.481 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:04 vm10 ceph-mon[82076]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mds"}]': finished 2026-03-09T00:16:04.481 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:04 vm10 ceph-mon[82076]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw"}]: dispatch 2026-03-09T00:16:04.481 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:04 vm10 ceph-mon[82076]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]: dispatch 2026-03-09T00:16:04.481 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:04 vm10 ceph-mon[82076]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]': finished 2026-03-09T00:16:04.481 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:04 vm10 ceph-mon[82076]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:16:04.481 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:04 vm10 ceph-mon[82076]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]: dispatch 2026-03-09T00:16:04.481 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:04 vm10 ceph-mon[82076]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]': finished 2026-03-09T00:16:04.481 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:04 vm10 ceph-mon[82076]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.iscsi"}]: dispatch 2026-03-09T00:16:04.481 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:04 vm10 ceph-mon[82076]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]: dispatch 2026-03-09T00:16:04.481 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:04 vm10 ceph-mon[82076]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]': finished 2026-03-09T00:16:04.481 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:04 vm10 ceph-mon[82076]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]: dispatch 2026-03-09T00:16:04.481 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:04 vm10 ceph-mon[82076]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]': finished 2026-03-09T00:16:04.481 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:04 vm10 ceph-mon[82076]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:16:04.481 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:04 vm10 ceph-mon[82076]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:16:04.481 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:04 vm10 ceph-mon[82076]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:16:04.481 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:04 vm10 ceph-mon[82076]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:16:04.481 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:04 vm10 ceph-mon[82076]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:16:04.481 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:04 vm10 ceph-mon[82076]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:16:04.481 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:04 vm10 ceph-mon[82076]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]: dispatch 2026-03-09T00:16:04.481 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:04 vm10 ceph-mon[82076]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd='[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]': finished 2026-03-09T00:16:04.481 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:04 vm10 ceph-mon[82076]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:16:04.481 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:04 vm10 ceph-mon[82076]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:16:04.481 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:04 vm10 ceph-mon[82076]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:16:04.481 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:04 vm10 ceph-mon[82076]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:16:04.481 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:04 vm10 ceph-mon[82076]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:16:04.481 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:04 vm10 ceph-mon[82076]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:16:04.481 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:04 vm10 ceph-mon[82076]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:16:04.481 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:04 vm10 ceph-mon[82076]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:16:05.351 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:16:05 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:16:05] "GET /metrics HTTP/1.1" 200 37553 "" "Prometheus/2.51.0" 2026-03-09T00:16:05.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:05 vm04 ceph-mon[51053]: Detected new or changed devices on vm10 2026-03-09T00:16:05.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:05 vm04 ceph-mon[51053]: Upgrade: Setting container_image for all crash 2026-03-09T00:16:05.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:05 vm04 ceph-mon[51053]: Upgrade: Setting container_image for all mds 2026-03-09T00:16:05.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:05 vm04 ceph-mon[51053]: Upgrade: Setting container_image for all rbd-mirror 2026-03-09T00:16:05.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:05 vm04 ceph-mon[51053]: Upgrade: Setting container_image for all cephfs-mirror 2026-03-09T00:16:05.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:05 vm04 ceph-mon[51053]: Upgrade: Setting container_image for all ceph-exporter 2026-03-09T00:16:05.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:05 vm04 ceph-mon[51053]: Upgrade: Setting container_image for all nfs 2026-03-09T00:16:05.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:05 vm04 ceph-mon[51053]: Upgrade: Setting container_image for all nvmeof 2026-03-09T00:16:05.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:05 vm04 ceph-mon[51053]: Upgrade: Setting container_image for all node-exporter 2026-03-09T00:16:05.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:05 vm04 ceph-mon[51053]: Upgrade: Setting container_image for all prometheus 2026-03-09T00:16:05.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:05 vm04 ceph-mon[51053]: Upgrade: Setting container_image for all alertmanager 2026-03-09T00:16:05.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:05 vm04 ceph-mon[51053]: Upgrade: Setting container_image for all grafana 2026-03-09T00:16:05.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:05 vm04 ceph-mon[51053]: Upgrade: Setting container_image for all loki 2026-03-09T00:16:05.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:05 vm04 ceph-mon[51053]: Upgrade: Setting container_image for all promtail 2026-03-09T00:16:05.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:05 vm04 ceph-mon[51053]: Upgrade: Finalizing container_image settings 2026-03-09T00:16:05.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:05 vm04 ceph-mon[51053]: Upgrade: Complete! 2026-03-09T00:16:05.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:05 vm04 ceph-mon[51053]: pgmap v24: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:16:05.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:05 vm04 ceph-mon[46823]: Detected new or changed devices on vm10 2026-03-09T00:16:05.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:05 vm04 ceph-mon[46823]: Upgrade: Setting container_image for all crash 2026-03-09T00:16:05.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:05 vm04 ceph-mon[46823]: Upgrade: Setting container_image for all mds 2026-03-09T00:16:05.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:05 vm04 ceph-mon[46823]: Upgrade: Setting container_image for all rbd-mirror 2026-03-09T00:16:05.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:05 vm04 ceph-mon[46823]: Upgrade: Setting container_image for all cephfs-mirror 2026-03-09T00:16:05.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:05 vm04 ceph-mon[46823]: Upgrade: Setting container_image for all ceph-exporter 2026-03-09T00:16:05.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:05 vm04 ceph-mon[46823]: Upgrade: Setting container_image for all nfs 2026-03-09T00:16:05.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:05 vm04 ceph-mon[46823]: Upgrade: Setting container_image for all nvmeof 2026-03-09T00:16:05.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:05 vm04 ceph-mon[46823]: Upgrade: Setting container_image for all node-exporter 2026-03-09T00:16:05.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:05 vm04 ceph-mon[46823]: Upgrade: Setting container_image for all prometheus 2026-03-09T00:16:05.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:05 vm04 ceph-mon[46823]: Upgrade: Setting container_image for all alertmanager 2026-03-09T00:16:05.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:05 vm04 ceph-mon[46823]: Upgrade: Setting container_image for all grafana 2026-03-09T00:16:05.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:05 vm04 ceph-mon[46823]: Upgrade: Setting container_image for all loki 2026-03-09T00:16:05.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:05 vm04 ceph-mon[46823]: Upgrade: Setting container_image for all promtail 2026-03-09T00:16:05.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:05 vm04 ceph-mon[46823]: Upgrade: Finalizing container_image settings 2026-03-09T00:16:05.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:05 vm04 ceph-mon[46823]: Upgrade: Complete! 2026-03-09T00:16:05.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:05 vm04 ceph-mon[46823]: pgmap v24: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:16:05.487 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:05 vm10 ceph-mon[82076]: Detected new or changed devices on vm10 2026-03-09T00:16:05.487 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:05 vm10 ceph-mon[82076]: Upgrade: Setting container_image for all crash 2026-03-09T00:16:05.487 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:05 vm10 ceph-mon[82076]: Upgrade: Setting container_image for all mds 2026-03-09T00:16:05.487 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:05 vm10 ceph-mon[82076]: Upgrade: Setting container_image for all rbd-mirror 2026-03-09T00:16:05.487 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:05 vm10 ceph-mon[82076]: Upgrade: Setting container_image for all cephfs-mirror 2026-03-09T00:16:05.487 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:05 vm10 ceph-mon[82076]: Upgrade: Setting container_image for all ceph-exporter 2026-03-09T00:16:05.487 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:05 vm10 ceph-mon[82076]: Upgrade: Setting container_image for all nfs 2026-03-09T00:16:05.487 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:05 vm10 ceph-mon[82076]: Upgrade: Setting container_image for all nvmeof 2026-03-09T00:16:05.487 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:05 vm10 ceph-mon[82076]: Upgrade: Setting container_image for all node-exporter 2026-03-09T00:16:05.487 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:05 vm10 ceph-mon[82076]: Upgrade: Setting container_image for all prometheus 2026-03-09T00:16:05.487 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:05 vm10 ceph-mon[82076]: Upgrade: Setting container_image for all alertmanager 2026-03-09T00:16:05.487 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:05 vm10 ceph-mon[82076]: Upgrade: Setting container_image for all grafana 2026-03-09T00:16:05.487 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:05 vm10 ceph-mon[82076]: Upgrade: Setting container_image for all loki 2026-03-09T00:16:05.487 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:05 vm10 ceph-mon[82076]: Upgrade: Setting container_image for all promtail 2026-03-09T00:16:05.487 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:05 vm10 ceph-mon[82076]: Upgrade: Finalizing container_image settings 2026-03-09T00:16:05.487 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:05 vm10 ceph-mon[82076]: Upgrade: Complete! 2026-03-09T00:16:05.487 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:05 vm10 ceph-mon[82076]: pgmap v24: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:16:06.100 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:16:05 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: 2026-03-09T00:16:05.699+0000 7f89a393c640 -1 mgr.server handle_report got status from non-daemon mon.b 2026-03-09T00:16:07.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:16:06 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:16:06.996Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:16:07.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:16:06 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:16:06.997Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:16:07.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:07 vm04 ceph-mon[51053]: pgmap v25: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:16:07.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:07 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:16:07.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:07 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:16:07.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:07 vm04 ceph-mon[46823]: pgmap v25: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:16:07.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:07 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:16:07.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:07 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:16:07.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:07 vm10 ceph-mon[82076]: pgmap v25: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:16:07.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:07 vm10 ceph-mon[82076]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:16:07.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:07 vm10 ceph-mon[82076]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:16:09.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:09 vm04 ceph-mon[51053]: pgmap v26: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:16:09.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:09 vm04 ceph-mon[46823]: pgmap v26: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:16:09.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:09 vm10 ceph-mon[82076]: pgmap v26: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:16:10.328 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:16:10 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=infra.usagestats t=2026-03-09T00:16:10.013221164Z level=info msg="Usage stats are ready to report" 2026-03-09T00:16:11.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:11 vm10 ceph-mon[82076]: pgmap v27: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:16:11.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:11 vm04 ceph-mon[51053]: pgmap v27: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:16:11.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:11 vm04 ceph-mon[46823]: pgmap v27: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:16:13.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:13 vm10 ceph-mon[82076]: pgmap v28: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:16:13.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:13 vm04 ceph-mon[51053]: pgmap v28: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:16:13.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:13 vm04 ceph-mon[46823]: pgmap v28: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:16:14.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:14 vm10 ceph-mon[82076]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:16:14.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:14 vm04 ceph-mon[51053]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:16:14.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:14 vm04 ceph-mon[46823]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:16:15.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:15 vm04 ceph-mon[51053]: pgmap v29: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:16:15.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:15 vm04 ceph-mon[46823]: pgmap v29: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:16:15.351 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:16:15 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:16:15] "GET /metrics HTTP/1.1" 200 37561 "" "Prometheus/2.51.0" 2026-03-09T00:16:15.515 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:15 vm10 ceph-mon[82076]: pgmap v29: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:16:17.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:17 vm04 ceph-mon[51053]: pgmap v30: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:16:17.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:17 vm04 ceph-mon[46823]: pgmap v30: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:16:17.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:16:16 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:16:16.997Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:16:17.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:16:16 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:16:16.998Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:16:17.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:17 vm10 ceph-mon[82076]: pgmap v30: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:16:19.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:19 vm10 ceph-mon[82076]: pgmap v31: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:16:19.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:19 vm04 ceph-mon[51053]: pgmap v31: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:16:19.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:19 vm04 ceph-mon[46823]: pgmap v31: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:16:21.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:21 vm10 ceph-mon[82076]: pgmap v32: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:16:21.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:21 vm04 ceph-mon[51053]: pgmap v32: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:16:21.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:21 vm04 ceph-mon[46823]: pgmap v32: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:16:22.402 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:22 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:16:22.402 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:22 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:16:22.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:22 vm10 ceph-mon[82076]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:16:23.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:23 vm10 ceph-mon[82076]: pgmap v33: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:16:23.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:23 vm04 ceph-mon[51053]: pgmap v33: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:16:23.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:23 vm04 ceph-mon[46823]: pgmap v33: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:16:24.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:24 vm10 ceph-mon[82076]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:16:24.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:24 vm04 ceph-mon[51053]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:16:24.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:24 vm04 ceph-mon[46823]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:16:25.350 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:16:25 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:16:25] "GET /metrics HTTP/1.1" 200 37563 "" "Prometheus/2.51.0" 2026-03-09T00:16:25.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:25 vm04 ceph-mon[51053]: pgmap v34: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:16:25.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:25 vm04 ceph-mon[46823]: pgmap v34: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:16:25.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:25 vm10 ceph-mon[82076]: pgmap v34: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:16:27.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:16:26 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:16:26.998Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:16:27.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:16:26 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:16:26.998Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:16:27.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:27 vm04 ceph-mon[51053]: pgmap v35: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:16:27.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:27 vm04 ceph-mon[46823]: pgmap v35: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:16:27.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:27 vm10 ceph-mon[82076]: pgmap v35: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:16:29.313 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch ps' 2026-03-09T00:16:29.455 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:29 vm04 ceph-mon[51053]: pgmap v36: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:16:29.455 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:29 vm04 ceph-mon[46823]: pgmap v36: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:16:29.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:29 vm10 ceph-mon[82076]: pgmap v36: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:16:29.770 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-09T00:16:29.770 INFO:teuthology.orchestra.run.vm04.stdout:alertmanager.a vm04 *:9093,9094 running (13m) 48s ago 19m 28.2M - 0.25.0 c8568f914cd2 bcac0140b0f6 2026-03-09T00:16:29.770 INFO:teuthology.orchestra.run.vm04.stdout:grafana.a vm10 *:3000 running (50s) 27s ago 19m 69.6M - 10.4.0 c8b91775d855 aa7f793dcb8e 2026-03-09T00:16:29.770 INFO:teuthology.orchestra.run.vm04.stdout:iscsi.foo.vm04.fbyciv vm04 running (57s) 48s ago 19m 43.5M - 3.5 e1d6a67b021e cdb4168e72eb 2026-03-09T00:16:29.770 INFO:teuthology.orchestra.run.vm04.stdout:mgr.x vm10 *:8443,9283,8765 running (61s) 27s ago 21m 488M - 19.2.3-678-ge911bdeb 654f31e6858e c971ca6e9652 2026-03-09T00:16:29.770 INFO:teuthology.orchestra.run.vm04.stdout:mgr.y vm04 *:8443,9283,8765 running (10m) 48s ago 22m 553M - 19.2.3-678-ge911bdeb 654f31e6858e 72a51572b51b 2026-03-09T00:16:29.770 INFO:teuthology.orchestra.run.vm04.stdout:mon.a vm04 running (22m) 48s ago 22m 76.0M 2048M 17.2.0 e1d6a67b021e a0a441d060f5 2026-03-09T00:16:29.770 INFO:teuthology.orchestra.run.vm04.stdout:mon.b vm10 running (29s) 27s ago 21m 20.6M 2048M 19.2.3-678-ge911bdeb 654f31e6858e b102ade927df 2026-03-09T00:16:29.770 INFO:teuthology.orchestra.run.vm04.stdout:mon.c vm04 running (21m) 48s ago 21m 54.5M 2048M 17.2.0 e1d6a67b021e 5c2d9165643c 2026-03-09T00:16:29.770 INFO:teuthology.orchestra.run.vm04.stdout:node-exporter.a vm04 *:9100 running (13m) 48s ago 20m 9957k - 1.7.0 72c9c2088986 38e0af6b2fbf 2026-03-09T00:16:29.770 INFO:teuthology.orchestra.run.vm04.stdout:node-exporter.b vm10 *:9100 running (13m) 27s ago 20m 10.1M - 1.7.0 72c9c2088986 d059c0022310 2026-03-09T00:16:29.770 INFO:teuthology.orchestra.run.vm04.stdout:osd.0 vm04 running (21m) 48s ago 21m 55.8M 4096M 17.2.0 e1d6a67b021e eb4d6ee04c91 2026-03-09T00:16:29.770 INFO:teuthology.orchestra.run.vm04.stdout:osd.1 vm04 running (21m) 48s ago 21m 55.4M 4096M 17.2.0 e1d6a67b021e f112f05700b8 2026-03-09T00:16:29.770 INFO:teuthology.orchestra.run.vm04.stdout:osd.2 vm04 running (21m) 48s ago 21m 52.5M 4096M 17.2.0 e1d6a67b021e a4ed5ecab7e4 2026-03-09T00:16:29.770 INFO:teuthology.orchestra.run.vm04.stdout:osd.3 vm04 running (21m) 48s ago 21m 54.6M 4096M 17.2.0 e1d6a67b021e d530f6e786d9 2026-03-09T00:16:29.770 INFO:teuthology.orchestra.run.vm04.stdout:osd.4 vm10 running (20m) 27s ago 20m 54.9M 4096M 17.2.0 e1d6a67b021e ad302e6f363c 2026-03-09T00:16:29.770 INFO:teuthology.orchestra.run.vm04.stdout:osd.5 vm10 running (20m) 27s ago 20m 53.4M 4096M 17.2.0 e1d6a67b021e 1bef86fdb303 2026-03-09T00:16:29.770 INFO:teuthology.orchestra.run.vm04.stdout:osd.6 vm10 running (20m) 27s ago 20m 52.3M 4096M 17.2.0 e1d6a67b021e 168db5828111 2026-03-09T00:16:29.770 INFO:teuthology.orchestra.run.vm04.stdout:osd.7 vm10 running (20m) 27s ago 20m 57.1M 4096M 17.2.0 e1d6a67b021e bc6bbac15079 2026-03-09T00:16:29.770 INFO:teuthology.orchestra.run.vm04.stdout:prometheus.a vm10 *:9095 running (62s) 27s ago 19m 50.3M - 2.51.0 1d3b7f56885b 1f53121cfa7f 2026-03-09T00:16:29.770 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.ehrfsf vm04 *:8000 running (19m) 48s ago 19m 98.5M - 17.2.0 e1d6a67b021e a815abb0c790 2026-03-09T00:16:29.770 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm10.dwizvi vm10 *:8000 running (19m) 27s ago 19m 97.0M - 17.2.0 e1d6a67b021e f6412acdf6e0 2026-03-09T00:16:29.816 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph versions | jq -e '"'"'.mon | length == 2'"'"'' 2026-03-09T00:16:30.344 INFO:teuthology.orchestra.run.vm04.stdout:true 2026-03-09T00:16:30.396 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch upgrade status' 2026-03-09T00:16:30.848 INFO:teuthology.orchestra.run.vm04.stdout:{ 2026-03-09T00:16:30.848 INFO:teuthology.orchestra.run.vm04.stdout: "target_image": null, 2026-03-09T00:16:30.848 INFO:teuthology.orchestra.run.vm04.stdout: "in_progress": false, 2026-03-09T00:16:30.848 INFO:teuthology.orchestra.run.vm04.stdout: "which": "", 2026-03-09T00:16:30.848 INFO:teuthology.orchestra.run.vm04.stdout: "services_complete": [], 2026-03-09T00:16:30.848 INFO:teuthology.orchestra.run.vm04.stdout: "progress": null, 2026-03-09T00:16:30.848 INFO:teuthology.orchestra.run.vm04.stdout: "message": "", 2026-03-09T00:16:30.848 INFO:teuthology.orchestra.run.vm04.stdout: "is_paused": false 2026-03-09T00:16:30.848 INFO:teuthology.orchestra.run.vm04.stdout:} 2026-03-09T00:16:30.894 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph health detail' 2026-03-09T00:16:31.445 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_OK 2026-03-09T00:16:31.446 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:31 vm04 ceph-mon[51053]: from='client.25195 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:16:31.446 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:31 vm04 ceph-mon[51053]: pgmap v37: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:16:31.446 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:31 vm04 ceph-mon[51053]: from='client.25201 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:16:31.446 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:31 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/457930142' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:31.446 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:31 vm04 ceph-mon[46823]: from='client.25195 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:16:31.446 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:31 vm04 ceph-mon[46823]: pgmap v37: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:16:31.446 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:31 vm04 ceph-mon[46823]: from='client.25201 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:16:31.446 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:31 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/457930142' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:31.499 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types mon --hosts $(ceph orch ps | grep mgr.y | awk '"'"'{print $2}'"'"')' 2026-03-09T00:16:31.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:31 vm10 ceph-mon[82076]: from='client.25195 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:16:31.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:31 vm10 ceph-mon[82076]: pgmap v37: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:16:31.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:31 vm10 ceph-mon[82076]: from='client.25201 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:16:31.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:31 vm10 ceph-mon[82076]: from='client.? 192.168.123.104:0/457930142' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:32.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:32 vm10 ceph-mon[82076]: from='client.25210 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:16:32.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:32 vm10 ceph-mon[82076]: from='client.? 192.168.123.104:0/971586168' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T00:16:32.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:32 vm04 ceph-mon[51053]: from='client.25210 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:16:32.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:32 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/971586168' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T00:16:32.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:32 vm04 ceph-mon[46823]: from='client.25210 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:16:32.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:32 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/971586168' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T00:16:33.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:33 vm10 ceph-mon[82076]: pgmap v38: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:16:33.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:33 vm10 ceph-mon[82076]: from='client.34127 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:16:33.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:33 vm04 ceph-mon[46823]: pgmap v38: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:16:33.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:33 vm04 ceph-mon[46823]: from='client.34127 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:16:33.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:33 vm04 ceph-mon[51053]: pgmap v38: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:16:33.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:33 vm04 ceph-mon[51053]: from='client.34127 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:16:33.829 INFO:teuthology.orchestra.run.vm04.stdout:Initiating upgrade to quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T00:16:33.880 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'while ceph orch upgrade status | jq '"'"'.in_progress'"'"' | grep true && ! ceph orch upgrade status | jq '"'"'.message'"'"' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; sleep 30 ; done' 2026-03-09T00:16:34.460 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:34 vm04 ceph-mon[46823]: from='client.25219 -' entity='client.admin' cmd=[{"prefix": "orch upgrade start", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "daemon_types": "mon", "hosts": "vm04", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:16:34.461 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:34 vm04 ceph-mon[46823]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:16:34.461 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:34 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:16:34.461 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:34 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:16:34.461 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:34 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:16:34.461 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:34 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:16:34.461 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:34 vm04 ceph-mon[46823]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:16:34.462 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:34 vm04 ceph-mon[51053]: from='client.25219 -' entity='client.admin' cmd=[{"prefix": "orch upgrade start", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "daemon_types": "mon", "hosts": "vm04", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:16:34.462 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:34 vm04 ceph-mon[51053]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:16:34.462 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:34 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:16:34.462 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:34 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:16:34.462 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:34 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:16:34.462 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:34 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:16:34.462 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:34 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:16:34.462 INFO:teuthology.orchestra.run.vm04.stdout:true 2026-03-09T00:16:34.479 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:34 vm10 ceph-mon[82076]: from='client.25219 -' entity='client.admin' cmd=[{"prefix": "orch upgrade start", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "daemon_types": "mon", "hosts": "vm04", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:16:34.479 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:34 vm10 ceph-mon[82076]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:16:34.479 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:34 vm10 ceph-mon[82076]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:16:34.479 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:34 vm10 ceph-mon[82076]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:16:34.479 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:34 vm10 ceph-mon[82076]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:16:34.479 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:34 vm10 ceph-mon[82076]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:16:34.479 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:34 vm10 ceph-mon[82076]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' 2026-03-09T00:16:34.851 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-09T00:16:34.851 INFO:teuthology.orchestra.run.vm04.stdout:alertmanager.a vm04 *:9093,9094 running (13m) 54s ago 20m 28.2M - 0.25.0 c8568f914cd2 bcac0140b0f6 2026-03-09T00:16:34.851 INFO:teuthology.orchestra.run.vm04.stdout:grafana.a vm10 *:3000 running (55s) 32s ago 19m 69.6M - 10.4.0 c8b91775d855 aa7f793dcb8e 2026-03-09T00:16:34.851 INFO:teuthology.orchestra.run.vm04.stdout:iscsi.foo.vm04.fbyciv vm04 running (62s) 54s ago 19m 43.5M - 3.5 e1d6a67b021e cdb4168e72eb 2026-03-09T00:16:34.851 INFO:teuthology.orchestra.run.vm04.stdout:mgr.x vm10 *:8443,9283,8765 running (66s) 32s ago 21m 488M - 19.2.3-678-ge911bdeb 654f31e6858e c971ca6e9652 2026-03-09T00:16:34.851 INFO:teuthology.orchestra.run.vm04.stdout:mgr.y vm04 *:8443,9283,8765 running (10m) 54s ago 22m 553M - 19.2.3-678-ge911bdeb 654f31e6858e 72a51572b51b 2026-03-09T00:16:34.851 INFO:teuthology.orchestra.run.vm04.stdout:mon.a vm04 running (22m) 54s ago 22m 76.0M 2048M 17.2.0 e1d6a67b021e a0a441d060f5 2026-03-09T00:16:34.851 INFO:teuthology.orchestra.run.vm04.stdout:mon.b vm10 running (34s) 32s ago 21m 20.6M 2048M 19.2.3-678-ge911bdeb 654f31e6858e b102ade927df 2026-03-09T00:16:34.851 INFO:teuthology.orchestra.run.vm04.stdout:mon.c vm04 running (21m) 54s ago 21m 54.5M 2048M 17.2.0 e1d6a67b021e 5c2d9165643c 2026-03-09T00:16:34.851 INFO:teuthology.orchestra.run.vm04.stdout:node-exporter.a vm04 *:9100 running (13m) 54s ago 20m 9957k - 1.7.0 72c9c2088986 38e0af6b2fbf 2026-03-09T00:16:34.851 INFO:teuthology.orchestra.run.vm04.stdout:node-exporter.b vm10 *:9100 running (13m) 32s ago 20m 10.1M - 1.7.0 72c9c2088986 d059c0022310 2026-03-09T00:16:34.851 INFO:teuthology.orchestra.run.vm04.stdout:osd.0 vm04 running (21m) 54s ago 21m 55.8M 4096M 17.2.0 e1d6a67b021e eb4d6ee04c91 2026-03-09T00:16:34.851 INFO:teuthology.orchestra.run.vm04.stdout:osd.1 vm04 running (21m) 54s ago 21m 55.4M 4096M 17.2.0 e1d6a67b021e f112f05700b8 2026-03-09T00:16:34.851 INFO:teuthology.orchestra.run.vm04.stdout:osd.2 vm04 running (21m) 54s ago 21m 52.5M 4096M 17.2.0 e1d6a67b021e a4ed5ecab7e4 2026-03-09T00:16:34.851 INFO:teuthology.orchestra.run.vm04.stdout:osd.3 vm04 running (21m) 54s ago 21m 54.6M 4096M 17.2.0 e1d6a67b021e d530f6e786d9 2026-03-09T00:16:34.851 INFO:teuthology.orchestra.run.vm04.stdout:osd.4 vm10 running (21m) 32s ago 21m 54.9M 4096M 17.2.0 e1d6a67b021e ad302e6f363c 2026-03-09T00:16:34.851 INFO:teuthology.orchestra.run.vm04.stdout:osd.5 vm10 running (20m) 32s ago 20m 53.4M 4096M 17.2.0 e1d6a67b021e 1bef86fdb303 2026-03-09T00:16:34.851 INFO:teuthology.orchestra.run.vm04.stdout:osd.6 vm10 running (20m) 32s ago 20m 52.3M 4096M 17.2.0 e1d6a67b021e 168db5828111 2026-03-09T00:16:34.851 INFO:teuthology.orchestra.run.vm04.stdout:osd.7 vm10 running (20m) 32s ago 20m 57.1M 4096M 17.2.0 e1d6a67b021e bc6bbac15079 2026-03-09T00:16:34.851 INFO:teuthology.orchestra.run.vm04.stdout:prometheus.a vm10 *:9095 running (68s) 32s ago 20m 50.3M - 2.51.0 1d3b7f56885b 1f53121cfa7f 2026-03-09T00:16:34.851 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.ehrfsf vm04 *:8000 running (19m) 54s ago 19m 98.5M - 17.2.0 e1d6a67b021e a815abb0c790 2026-03-09T00:16:34.851 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm10.dwizvi vm10 *:8000 running (19m) 32s ago 19m 97.0M - 17.2.0 e1d6a67b021e f6412acdf6e0 2026-03-09T00:16:35.087 INFO:teuthology.orchestra.run.vm04.stdout:{ 2026-03-09T00:16:35.087 INFO:teuthology.orchestra.run.vm04.stdout: "mon": { 2026-03-09T00:16:35.087 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 2, 2026-03-09T00:16:35.087 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 1 2026-03-09T00:16:35.087 INFO:teuthology.orchestra.run.vm04.stdout: }, 2026-03-09T00:16:35.087 INFO:teuthology.orchestra.run.vm04.stdout: "mgr": { 2026-03-09T00:16:35.087 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-09T00:16:35.087 INFO:teuthology.orchestra.run.vm04.stdout: }, 2026-03-09T00:16:35.087 INFO:teuthology.orchestra.run.vm04.stdout: "osd": { 2026-03-09T00:16:35.087 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 8 2026-03-09T00:16:35.087 INFO:teuthology.orchestra.run.vm04.stdout: }, 2026-03-09T00:16:35.087 INFO:teuthology.orchestra.run.vm04.stdout: "mds": {}, 2026-03-09T00:16:35.087 INFO:teuthology.orchestra.run.vm04.stdout: "rgw": { 2026-03-09T00:16:35.087 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 2 2026-03-09T00:16:35.087 INFO:teuthology.orchestra.run.vm04.stdout: }, 2026-03-09T00:16:35.087 INFO:teuthology.orchestra.run.vm04.stdout: "overall": { 2026-03-09T00:16:35.087 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 12, 2026-03-09T00:16:35.087 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 3 2026-03-09T00:16:35.087 INFO:teuthology.orchestra.run.vm04.stdout: } 2026-03-09T00:16:35.087 INFO:teuthology.orchestra.run.vm04.stdout:} 2026-03-09T00:16:35.188 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:16:35 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:16:35] "GET /metrics HTTP/1.1" 200 37563 "" "Prometheus/2.51.0" 2026-03-09T00:16:35.289 INFO:teuthology.orchestra.run.vm04.stdout:{ 2026-03-09T00:16:35.289 INFO:teuthology.orchestra.run.vm04.stdout: "target_image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", 2026-03-09T00:16:35.289 INFO:teuthology.orchestra.run.vm04.stdout: "in_progress": true, 2026-03-09T00:16:35.289 INFO:teuthology.orchestra.run.vm04.stdout: "which": "Upgrading daemons of type(s) mon on host(s) vm04", 2026-03-09T00:16:35.289 INFO:teuthology.orchestra.run.vm04.stdout: "services_complete": [], 2026-03-09T00:16:35.289 INFO:teuthology.orchestra.run.vm04.stdout: "progress": "", 2026-03-09T00:16:35.289 INFO:teuthology.orchestra.run.vm04.stdout: "message": "Doing first pull of quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df image", 2026-03-09T00:16:35.289 INFO:teuthology.orchestra.run.vm04.stdout: "is_paused": false 2026-03-09T00:16:35.289 INFO:teuthology.orchestra.run.vm04.stdout:} 2026-03-09T00:16:35.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:35 vm10 ceph-mon[82076]: pgmap v39: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:16:35.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:35 vm10 ceph-mon[82076]: Upgrade: Started with target quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T00:16:35.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:35 vm10 ceph-mon[82076]: Upgrade: First pull of quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T00:16:35.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:35 vm10 ceph-mon[82076]: from='client.25225 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:16:35.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:35 vm10 ceph-mon[82076]: from='client.15354 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:16:35.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:35 vm10 ceph-mon[82076]: from='client.15360 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:16:35.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:35 vm10 ceph-mon[82076]: from='client.? 192.168.123.104:0/693322510' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:35.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:35 vm04 ceph-mon[51053]: pgmap v39: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:16:35.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:35 vm04 ceph-mon[51053]: Upgrade: Started with target quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T00:16:35.602 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:35 vm04 ceph-mon[51053]: Upgrade: First pull of quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T00:16:35.602 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:35 vm04 ceph-mon[51053]: from='client.25225 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:16:35.602 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:35 vm04 ceph-mon[51053]: from='client.15354 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:16:35.602 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:35 vm04 ceph-mon[51053]: from='client.15360 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:16:35.602 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:35 vm04 ceph-mon[51053]: from='client.? 192.168.123.104:0/693322510' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:35.602 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:35 vm04 ceph-mon[46823]: pgmap v39: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:16:35.602 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:35 vm04 ceph-mon[46823]: Upgrade: Started with target quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T00:16:35.602 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:35 vm04 ceph-mon[46823]: Upgrade: First pull of quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T00:16:35.602 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:35 vm04 ceph-mon[46823]: from='client.25225 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:16:35.602 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:35 vm04 ceph-mon[46823]: from='client.15354 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:16:35.602 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:35 vm04 ceph-mon[46823]: from='client.15360 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:16:35.602 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:35 vm04 ceph-mon[46823]: from='client.? 192.168.123.104:0/693322510' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:36.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:36 vm04 systemd[1]: Stopping Ceph mon.a for fdcbddf6-1b49-11f1-80b0-7392062373f9... 2026-03-09T00:16:36.915 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:16:36 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: [09/Mar/2026:00:16:36] ENGINE Bus STOPPING 2026-03-09T00:16:36.915 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:36 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mon-a[46819]: 2026-03-09T00:16:36.629+0000 7f28ac753700 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-mon -n mon.a -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false (PID: 1) UID: 0 2026-03-09T00:16:36.915 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:36 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mon-a[46819]: 2026-03-09T00:16:36.629+0000 7f28ac753700 -1 mon.a@0(leader) e3 *** Got Signal Terminated *** 2026-03-09T00:16:36.915 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:36 vm04 podman[94484]: 2026-03-09 00:16:36.855568297 +0000 UTC m=+0.238175877 container died a0a441d060f5b6c6eb3d2500745610b097b8e56bdaf461e6ad43f84dce8f45ac (image=quay.io/ceph/ceph:v17.2.0, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mon-a, ceph=True, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, distribution-scope=public, io.openshift.expose-services=, io.openshift.tags=base centos centos-stream, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, GIT_BRANCH=HEAD, GIT_REPO=https://github.com/ceph/ceph-container.git, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, CEPH_POINT_RELEASE=-17.2.0, RELEASE=HEAD, architecture=x86_64, build-date=2022-05-03T08:36:31.336870, name=centos-stream, release=754, io.buildah.version=1.19.8, vcs-type=git, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, io.k8s.display-name=CentOS Stream 8, maintainer=Guillaume Abrioux , vendor=Red Hat, Inc., com.redhat.component=centos-stream-container, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., version=8, GIT_CLEAN=True) 2026-03-09T00:16:36.915 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:36 vm04 podman[94484]: 2026-03-09 00:16:36.871257651 +0000 UTC m=+0.253865231 container remove a0a441d060f5b6c6eb3d2500745610b097b8e56bdaf461e6ad43f84dce8f45ac (image=quay.io/ceph/ceph:v17.2.0, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mon-a, CEPH_POINT_RELEASE=-17.2.0, ceph=True, RELEASE=HEAD, maintainer=Guillaume Abrioux , vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, architecture=x86_64, version=8, GIT_BRANCH=HEAD, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, vcs-type=git, vendor=Red Hat, Inc., io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., release=754, io.openshift.tags=base centos centos-stream, io.k8s.display-name=CentOS Stream 8, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, GIT_CLEAN=True, com.redhat.component=centos-stream-container, name=centos-stream, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, io.openshift.expose-services=, build-date=2022-05-03T08:36:31.336870, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, io.buildah.version=1.19.8, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, distribution-scope=public, GIT_REPO=https://github.com/ceph/ceph-container.git) 2026-03-09T00:16:36.915 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:36 vm04 bash[94484]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mon-a 2026-03-09T00:16:37.190 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:16:37.000Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:16:37.190 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:16:37.001Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:16:37.190 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:16:37 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: [09/Mar/2026:00:16:37] ENGINE HTTP Server cherrypy._cpwsgi_server.CPWSGIServer(('::', 9283)) shut down 2026-03-09T00:16:37.190 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:16:37 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: [09/Mar/2026:00:16:37] ENGINE Bus STOPPED 2026-03-09T00:16:37.190 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:16:37 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: [09/Mar/2026:00:16:37] ENGINE Bus STARTING 2026-03-09T00:16:37.190 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:16:37 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: [09/Mar/2026:00:16:37] ENGINE Serving on http://:::9283 2026-03-09T00:16:37.190 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:16:37 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: [09/Mar/2026:00:16:37] ENGINE Bus STARTED 2026-03-09T00:16:37.191 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:36 vm04 systemd[1]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@mon.a.service: Deactivated successfully. 2026-03-09T00:16:37.191 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:36 vm04 systemd[1]: Stopped Ceph mon.a for fdcbddf6-1b49-11f1-80b0-7392062373f9. 2026-03-09T00:16:37.191 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:36 vm04 systemd[1]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@mon.a.service: Consumed 15.854s CPU time. 2026-03-09T00:16:37.191 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 systemd[1]: Starting Ceph mon.a for fdcbddf6-1b49-11f1-80b0-7392062373f9... 2026-03-09T00:16:37.602 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 podman[94605]: 2026-03-09 00:16:37.190634937 +0000 UTC m=+0.018077987 container create 3a1ecb9ee7d191bb1845550c97a52695f4acc23fc6ceefc1d611b326653b12ef (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mon-a, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.build-date=20260223, ceph=True, org.label-schema.license=GPLv2, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_REF=squid, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-09T00:16:37.602 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 podman[94605]: 2026-03-09 00:16:37.226385688 +0000 UTC m=+0.053828748 container init 3a1ecb9ee7d191bb1845550c97a52695f4acc23fc6ceefc1d611b326653b12ef (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mon-a, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.vendor=CentOS, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=squid, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0) 2026-03-09T00:16:37.602 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 podman[94605]: 2026-03-09 00:16:37.229628287 +0000 UTC m=+0.057071337 container start 3a1ecb9ee7d191bb1845550c97a52695f4acc23fc6ceefc1d611b326653b12ef (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mon-a, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.41.3, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team , FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_REF=squid, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.build-date=20260223, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.license=GPLv2) 2026-03-09T00:16:37.602 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 bash[94605]: 3a1ecb9ee7d191bb1845550c97a52695f4acc23fc6ceefc1d611b326653b12ef 2026-03-09T00:16:37.602 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 podman[94605]: 2026-03-09 00:16:37.182856397 +0000 UTC m=+0.010299457 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc 2026-03-09T00:16:37.602 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 systemd[1]: Started Ceph mon.a for fdcbddf6-1b49-11f1-80b0-7392062373f9. 2026-03-09T00:16:37.602 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: set uid:gid to 167:167 (ceph:ceph) 2026-03-09T00:16:37.602 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable), process ceph-mon, pid 2 2026-03-09T00:16:37.602 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: pidfile_write: ignore empty --pid-file 2026-03-09T00:16:37.602 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: load: jerasure load: lrc 2026-03-09T00:16:37.602 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: RocksDB version: 7.9.2 2026-03-09T00:16:37.602 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Git sha 0 2026-03-09T00:16:37.602 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Compile date 2026-02-25 18:11:04 2026-03-09T00:16:37.602 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: DB SUMMARY 2026-03-09T00:16:37.602 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: DB Session ID: 3IIG1DZDRR4CG5G2RCS2 2026-03-09T00:16:37.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: CURRENT file: CURRENT 2026-03-09T00:16:37.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: IDENTITY file: IDENTITY 2026-03-09T00:16:37.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: MANIFEST file: MANIFEST-000015 size: 2152 Bytes 2026-03-09T00:16:37.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: SST files in /var/lib/ceph/mon/ceph-a/store.db dir, Total Num: 1, files: 000048.sst 2026-03-09T00:16:37.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Write Ahead Log file in /var/lib/ceph/mon/ceph-a/store.db: 000046.log size: 843904 ; 2026-03-09T00:16:37.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.error_if_exists: 0 2026-03-09T00:16:37.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.create_if_missing: 0 2026-03-09T00:16:37.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.paranoid_checks: 1 2026-03-09T00:16:37.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.flush_verify_memtable_count: 1 2026-03-09T00:16:37.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.track_and_verify_wals_in_manifest: 0 2026-03-09T00:16:37.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.verify_sst_unique_id_in_manifest: 1 2026-03-09T00:16:37.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.env: 0x559d13681dc0 2026-03-09T00:16:37.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.fs: PosixFileSystem 2026-03-09T00:16:37.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.info_log: 0x559d152125c0 2026-03-09T00:16:37.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.max_file_opening_threads: 16 2026-03-09T00:16:37.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.statistics: (nil) 2026-03-09T00:16:37.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.use_fsync: 0 2026-03-09T00:16:37.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.max_log_file_size: 0 2026-03-09T00:16:37.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.max_manifest_file_size: 1073741824 2026-03-09T00:16:37.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.log_file_time_to_roll: 0 2026-03-09T00:16:37.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.keep_log_file_num: 1000 2026-03-09T00:16:37.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.recycle_log_file_num: 0 2026-03-09T00:16:37.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.allow_fallocate: 1 2026-03-09T00:16:37.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.allow_mmap_reads: 0 2026-03-09T00:16:37.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.allow_mmap_writes: 0 2026-03-09T00:16:37.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.use_direct_reads: 0 2026-03-09T00:16:37.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.use_direct_io_for_flush_and_compaction: 0 2026-03-09T00:16:37.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.create_missing_column_families: 0 2026-03-09T00:16:37.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.db_log_dir: 2026-03-09T00:16:37.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.wal_dir: 2026-03-09T00:16:37.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.table_cache_numshardbits: 6 2026-03-09T00:16:37.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.WAL_ttl_seconds: 0 2026-03-09T00:16:37.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.WAL_size_limit_MB: 0 2026-03-09T00:16:37.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.max_write_batch_group_size_bytes: 1048576 2026-03-09T00:16:37.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.manifest_preallocation_size: 4194304 2026-03-09T00:16:37.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.is_fd_close_on_exec: 1 2026-03-09T00:16:37.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.advise_random_on_open: 1 2026-03-09T00:16:37.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.db_write_buffer_size: 0 2026-03-09T00:16:37.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.write_buffer_manager: 0x559d15217900 2026-03-09T00:16:37.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.access_hint_on_compaction_start: 1 2026-03-09T00:16:37.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.random_access_max_buffer_size: 1048576 2026-03-09T00:16:37.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.use_adaptive_mutex: 0 2026-03-09T00:16:37.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.rate_limiter: (nil) 2026-03-09T00:16:37.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.sst_file_manager.rate_bytes_per_sec: 0 2026-03-09T00:16:37.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.wal_recovery_mode: 2 2026-03-09T00:16:37.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.enable_thread_tracking: 0 2026-03-09T00:16:37.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.enable_pipelined_write: 0 2026-03-09T00:16:37.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.unordered_write: 0 2026-03-09T00:16:37.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.allow_concurrent_memtable_write: 1 2026-03-09T00:16:37.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.enable_write_thread_adaptive_yield: 1 2026-03-09T00:16:37.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.write_thread_max_yield_usec: 100 2026-03-09T00:16:37.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.write_thread_slow_yield_usec: 3 2026-03-09T00:16:37.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.row_cache: None 2026-03-09T00:16:37.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.wal_filter: None 2026-03-09T00:16:37.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.avoid_flush_during_recovery: 0 2026-03-09T00:16:37.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.allow_ingest_behind: 0 2026-03-09T00:16:37.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.two_write_queues: 0 2026-03-09T00:16:37.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.manual_wal_flush: 0 2026-03-09T00:16:37.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.wal_compression: 0 2026-03-09T00:16:37.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.atomic_flush: 0 2026-03-09T00:16:37.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.avoid_unnecessary_blocking_io: 0 2026-03-09T00:16:37.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.persist_stats_to_disk: 0 2026-03-09T00:16:37.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.write_dbid_to_manifest: 0 2026-03-09T00:16:37.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.log_readahead_size: 0 2026-03-09T00:16:37.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.file_checksum_gen_factory: Unknown 2026-03-09T00:16:37.604 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.best_efforts_recovery: 0 2026-03-09T00:16:37.604 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.max_bgerror_resume_count: 2147483647 2026-03-09T00:16:37.604 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.bgerror_resume_retry_interval: 1000000 2026-03-09T00:16:37.604 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.allow_data_in_errors: 0 2026-03-09T00:16:37.604 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.db_host_id: __hostname__ 2026-03-09T00:16:37.604 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.enforce_single_del_contracts: true 2026-03-09T00:16:37.604 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.max_background_jobs: 2 2026-03-09T00:16:37.604 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.max_background_compactions: -1 2026-03-09T00:16:37.604 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.max_subcompactions: 1 2026-03-09T00:16:37.604 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.avoid_flush_during_shutdown: 0 2026-03-09T00:16:37.604 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.writable_file_max_buffer_size: 1048576 2026-03-09T00:16:37.604 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.delayed_write_rate : 16777216 2026-03-09T00:16:37.604 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.max_total_wal_size: 0 2026-03-09T00:16:37.604 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.delete_obsolete_files_period_micros: 21600000000 2026-03-09T00:16:37.604 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.stats_dump_period_sec: 600 2026-03-09T00:16:37.604 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.stats_persist_period_sec: 600 2026-03-09T00:16:37.604 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.stats_history_buffer_size: 1048576 2026-03-09T00:16:37.604 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.max_open_files: -1 2026-03-09T00:16:37.604 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.bytes_per_sync: 0 2026-03-09T00:16:37.604 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.wal_bytes_per_sync: 0 2026-03-09T00:16:37.604 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.strict_bytes_per_sync: 0 2026-03-09T00:16:37.604 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.compaction_readahead_size: 0 2026-03-09T00:16:37.604 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.max_background_flushes: -1 2026-03-09T00:16:37.604 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Compression algorithms supported: 2026-03-09T00:16:37.604 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: kZSTD supported: 0 2026-03-09T00:16:37.604 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: kXpressCompression supported: 0 2026-03-09T00:16:37.604 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: kBZip2Compression supported: 0 2026-03-09T00:16:37.604 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: kZSTDNotFinalCompression supported: 0 2026-03-09T00:16:37.604 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: kLZ4Compression supported: 1 2026-03-09T00:16:37.604 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: kZlibCompression supported: 1 2026-03-09T00:16:37.604 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: kLZ4HCCompression supported: 1 2026-03-09T00:16:37.604 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: kSnappyCompression supported: 1 2026-03-09T00:16:37.604 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Fast CRC32 supported: Supported on x86 2026-03-09T00:16:37.604 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: DMutex implementation: pthread_mutex_t 2026-03-09T00:16:37.604 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: [db/version_set.cc:5527] Recovering from manifest file: /var/lib/ceph/mon/ceph-a/store.db/MANIFEST-000015 2026-03-09T00:16:37.604 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [default]: 2026-03-09T00:16:37.604 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.comparator: leveldb.BytewiseComparator 2026-03-09T00:16:37.604 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.merge_operator: 2026-03-09T00:16:37.604 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.compaction_filter: None 2026-03-09T00:16:37.604 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.compaction_filter_factory: None 2026-03-09T00:16:37.604 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.sst_partitioner_factory: None 2026-03-09T00:16:37.604 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.memtable_factory: SkipListFactory 2026-03-09T00:16:37.604 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.table_factory: BlockBasedTable 2026-03-09T00:16:37.604 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: table_factory options: flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x559d152125a0) 2026-03-09T00:16:37.604 INFO:journalctl@ceph.mon.a.vm04.stdout: cache_index_and_filter_blocks: 1 2026-03-09T00:16:37.604 INFO:journalctl@ceph.mon.a.vm04.stdout: cache_index_and_filter_blocks_with_high_priority: 0 2026-03-09T00:16:37.604 INFO:journalctl@ceph.mon.a.vm04.stdout: pin_l0_filter_and_index_blocks_in_cache: 0 2026-03-09T00:16:37.604 INFO:journalctl@ceph.mon.a.vm04.stdout: pin_top_level_index_and_filter: 1 2026-03-09T00:16:37.604 INFO:journalctl@ceph.mon.a.vm04.stdout: index_type: 0 2026-03-09T00:16:37.604 INFO:journalctl@ceph.mon.a.vm04.stdout: data_block_index_type: 0 2026-03-09T00:16:37.604 INFO:journalctl@ceph.mon.a.vm04.stdout: index_shortening: 1 2026-03-09T00:16:37.604 INFO:journalctl@ceph.mon.a.vm04.stdout: data_block_hash_table_util_ratio: 0.750000 2026-03-09T00:16:37.604 INFO:journalctl@ceph.mon.a.vm04.stdout: checksum: 4 2026-03-09T00:16:37.604 INFO:journalctl@ceph.mon.a.vm04.stdout: no_block_cache: 0 2026-03-09T00:16:37.604 INFO:journalctl@ceph.mon.a.vm04.stdout: block_cache: 0x559d15237350 2026-03-09T00:16:37.604 INFO:journalctl@ceph.mon.a.vm04.stdout: block_cache_name: BinnedLRUCache 2026-03-09T00:16:37.604 INFO:journalctl@ceph.mon.a.vm04.stdout: block_cache_options: 2026-03-09T00:16:37.604 INFO:journalctl@ceph.mon.a.vm04.stdout: capacity : 536870912 2026-03-09T00:16:37.604 INFO:journalctl@ceph.mon.a.vm04.stdout: num_shard_bits : 4 2026-03-09T00:16:37.604 INFO:journalctl@ceph.mon.a.vm04.stdout: strict_capacity_limit : 0 2026-03-09T00:16:37.604 INFO:journalctl@ceph.mon.a.vm04.stdout: high_pri_pool_ratio: 0.000 2026-03-09T00:16:37.604 INFO:journalctl@ceph.mon.a.vm04.stdout: block_cache_compressed: (nil) 2026-03-09T00:16:37.604 INFO:journalctl@ceph.mon.a.vm04.stdout: persistent_cache: (nil) 2026-03-09T00:16:37.604 INFO:journalctl@ceph.mon.a.vm04.stdout: block_size: 4096 2026-03-09T00:16:37.604 INFO:journalctl@ceph.mon.a.vm04.stdout: block_size_deviation: 10 2026-03-09T00:16:37.604 INFO:journalctl@ceph.mon.a.vm04.stdout: block_restart_interval: 16 2026-03-09T00:16:37.604 INFO:journalctl@ceph.mon.a.vm04.stdout: index_block_restart_interval: 1 2026-03-09T00:16:37.605 INFO:journalctl@ceph.mon.a.vm04.stdout: metadata_block_size: 4096 2026-03-09T00:16:37.605 INFO:journalctl@ceph.mon.a.vm04.stdout: partition_filters: 0 2026-03-09T00:16:37.605 INFO:journalctl@ceph.mon.a.vm04.stdout: use_delta_encoding: 1 2026-03-09T00:16:37.605 INFO:journalctl@ceph.mon.a.vm04.stdout: filter_policy: bloomfilter 2026-03-09T00:16:37.605 INFO:journalctl@ceph.mon.a.vm04.stdout: whole_key_filtering: 1 2026-03-09T00:16:37.605 INFO:journalctl@ceph.mon.a.vm04.stdout: verify_compression: 0 2026-03-09T00:16:37.605 INFO:journalctl@ceph.mon.a.vm04.stdout: read_amp_bytes_per_bit: 0 2026-03-09T00:16:37.605 INFO:journalctl@ceph.mon.a.vm04.stdout: format_version: 5 2026-03-09T00:16:37.605 INFO:journalctl@ceph.mon.a.vm04.stdout: enable_index_compression: 1 2026-03-09T00:16:37.605 INFO:journalctl@ceph.mon.a.vm04.stdout: block_align: 0 2026-03-09T00:16:37.605 INFO:journalctl@ceph.mon.a.vm04.stdout: max_auto_readahead_size: 262144 2026-03-09T00:16:37.605 INFO:journalctl@ceph.mon.a.vm04.stdout: prepopulate_block_cache: 0 2026-03-09T00:16:37.605 INFO:journalctl@ceph.mon.a.vm04.stdout: initial_auto_readahead_size: 8192 2026-03-09T00:16:37.605 INFO:journalctl@ceph.mon.a.vm04.stdout: num_file_reads_for_auto_readahead: 2 2026-03-09T00:16:37.605 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.write_buffer_size: 33554432 2026-03-09T00:16:37.605 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.max_write_buffer_number: 2 2026-03-09T00:16:37.605 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.compression: NoCompression 2026-03-09T00:16:37.605 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.bottommost_compression: Disabled 2026-03-09T00:16:37.605 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.prefix_extractor: nullptr 2026-03-09T00:16:37.605 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.memtable_insert_with_hint_prefix_extractor: nullptr 2026-03-09T00:16:37.605 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.num_levels: 7 2026-03-09T00:16:37.605 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.min_write_buffer_number_to_merge: 1 2026-03-09T00:16:37.605 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.max_write_buffer_number_to_maintain: 0 2026-03-09T00:16:37.605 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.max_write_buffer_size_to_maintain: 0 2026-03-09T00:16:37.605 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.bottommost_compression_opts.window_bits: -14 2026-03-09T00:16:37.605 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.bottommost_compression_opts.level: 32767 2026-03-09T00:16:37.605 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.bottommost_compression_opts.strategy: 0 2026-03-09T00:16:37.605 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.bottommost_compression_opts.max_dict_bytes: 0 2026-03-09T00:16:37.605 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.bottommost_compression_opts.zstd_max_train_bytes: 0 2026-03-09T00:16:37.605 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.bottommost_compression_opts.parallel_threads: 1 2026-03-09T00:16:37.605 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.bottommost_compression_opts.enabled: false 2026-03-09T00:16:37.605 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.bottommost_compression_opts.max_dict_buffer_bytes: 0 2026-03-09T00:16:37.605 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.bottommost_compression_opts.use_zstd_dict_trainer: true 2026-03-09T00:16:37.605 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.compression_opts.window_bits: -14 2026-03-09T00:16:37.605 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.compression_opts.level: 32767 2026-03-09T00:16:37.605 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.compression_opts.strategy: 0 2026-03-09T00:16:37.605 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.compression_opts.max_dict_bytes: 0 2026-03-09T00:16:37.605 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.compression_opts.zstd_max_train_bytes: 0 2026-03-09T00:16:37.605 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.compression_opts.use_zstd_dict_trainer: true 2026-03-09T00:16:37.605 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.compression_opts.parallel_threads: 1 2026-03-09T00:16:37.605 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.compression_opts.enabled: false 2026-03-09T00:16:37.605 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.compression_opts.max_dict_buffer_bytes: 0 2026-03-09T00:16:37.605 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.level0_file_num_compaction_trigger: 4 2026-03-09T00:16:37.605 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.level0_slowdown_writes_trigger: 20 2026-03-09T00:16:37.605 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.level0_stop_writes_trigger: 36 2026-03-09T00:16:37.605 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.target_file_size_base: 67108864 2026-03-09T00:16:37.605 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.target_file_size_multiplier: 1 2026-03-09T00:16:37.605 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.max_bytes_for_level_base: 268435456 2026-03-09T00:16:37.605 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.level_compaction_dynamic_level_bytes: 1 2026-03-09T00:16:37.605 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.max_bytes_for_level_multiplier: 10.000000 2026-03-09T00:16:37.605 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1 2026-03-09T00:16:37.605 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1 2026-03-09T00:16:37.605 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1 2026-03-09T00:16:37.605 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1 2026-03-09T00:16:37.605 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1 2026-03-09T00:16:37.605 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1 2026-03-09T00:16:37.605 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1 2026-03-09T00:16:37.605 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.max_sequential_skip_in_iterations: 8 2026-03-09T00:16:37.605 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.max_compaction_bytes: 1677721600 2026-03-09T00:16:37.605 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.ignore_max_compaction_bytes_for_input: true 2026-03-09T00:16:37.605 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.arena_block_size: 1048576 2026-03-09T00:16:37.605 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.soft_pending_compaction_bytes_limit: 68719476736 2026-03-09T00:16:37.605 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.hard_pending_compaction_bytes_limit: 274877906944 2026-03-09T00:16:37.605 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.disable_auto_compactions: 0 2026-03-09T00:16:37.605 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.compaction_style: kCompactionStyleLevel 2026-03-09T00:16:37.605 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.compaction_pri: kMinOverlappingRatio 2026-03-09T00:16:37.605 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.compaction_options_universal.size_ratio: 1 2026-03-09T00:16:37.606 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.compaction_options_universal.min_merge_width: 2 2026-03-09T00:16:37.606 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295 2026-03-09T00:16:37.606 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200 2026-03-09T00:16:37.606 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1 2026-03-09T00:16:37.606 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize 2026-03-09T00:16:37.606 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824 2026-03-09T00:16:37.606 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0 2026-03-09T00:16:37.606 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0); 2026-03-09T00:16:37.606 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.inplace_update_support: 0 2026-03-09T00:16:37.606 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.inplace_update_num_locks: 10000 2026-03-09T00:16:37.606 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.memtable_prefix_bloom_size_ratio: 0.000000 2026-03-09T00:16:37.606 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.memtable_whole_key_filtering: 0 2026-03-09T00:16:37.606 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.memtable_huge_page_size: 0 2026-03-09T00:16:37.606 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.bloom_locality: 0 2026-03-09T00:16:37.606 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.max_successive_merges: 0 2026-03-09T00:16:37.606 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.optimize_filters_for_hits: 0 2026-03-09T00:16:37.606 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.paranoid_file_checks: 0 2026-03-09T00:16:37.606 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.force_consistency_checks: 1 2026-03-09T00:16:37.606 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.report_bg_io_stats: 0 2026-03-09T00:16:37.606 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.ttl: 2592000 2026-03-09T00:16:37.606 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.periodic_compaction_seconds: 0 2026-03-09T00:16:37.606 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.preclude_last_level_data_seconds: 0 2026-03-09T00:16:37.606 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.preserve_internal_time_seconds: 0 2026-03-09T00:16:37.606 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.enable_blob_files: false 2026-03-09T00:16:37.606 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.min_blob_size: 0 2026-03-09T00:16:37.606 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.blob_file_size: 268435456 2026-03-09T00:16:37.606 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.blob_compression_type: NoCompression 2026-03-09T00:16:37.606 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.enable_blob_garbage_collection: false 2026-03-09T00:16:37.606 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.blob_garbage_collection_age_cutoff: 0.250000 2026-03-09T00:16:37.606 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000 2026-03-09T00:16:37.606 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.blob_compaction_readahead_size: 0 2026-03-09T00:16:37.606 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.blob_file_starting_level: 0 2026-03-09T00:16:37.606 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: Options.experimental_mempurge_threshold: 0.000000 2026-03-09T00:16:37.606 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: [table/block_based/block_based_table_reader.cc:721] At least one SST file opened without unique ID to verify: 48.sst 2026-03-09T00:16:37.606 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed. 2026-03-09T00:16:37.606 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: [db/version_set.cc:5566] Recovered from manifest file:/var/lib/ceph/mon/ceph-a/store.db/MANIFEST-000015 succeeded,manifest_file_number is 15, next_file_number is 50, last_sequence is 22045, log_number is 46,prev_log_number is 0,max_column_family is 0,min_log_number_to_keep is 0 2026-03-09T00:16:37.606 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: [db/version_set.cc:5581] Column family [default] (ID 0), log number is 46 2026-03-09T00:16:37.606 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: [db/db_impl/db_impl_open.cc:539] DB ID: 4b47172a-14e0-45d7-8049-a975fd5f3a1c 2026-03-09T00:16:37.606 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: EVENT_LOG_v1 {"time_micros": 1773015397254243, "job": 1, "event": "recovery_started", "wal_files": [46]} 2026-03-09T00:16:37.606 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: [db/db_impl/db_impl_open.cc:1043] Recovering log #46 mode 2 2026-03-09T00:16:37.606 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: EVENT_LOG_v1 {"time_micros": 1773015397258348, "cf_name": "default", "job": 1, "event": "table_file_creation", "file_number": 51, "file_size": 771656, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 22056, "largest_seqno": 22864, "table_properties": {"data_size": 767852, "index_size": 1809, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 965, "raw_key_size": 10063, "raw_average_key_size": 26, "raw_value_size": 759853, "raw_average_value_size": 1994, "num_data_blocks": 82, "num_entries": 381, "num_filter_entries": 381, "num_deletions": 8, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1773015397, "oldest_key_time": 0, "file_creation_time": 0, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "4b47172a-14e0-45d7-8049-a975fd5f3a1c", "db_session_id": "3IIG1DZDRR4CG5G2RCS2", "orig_file_number": 51, "seqno_to_time_mapping": "N/A"}} 2026-03-09T00:16:37.606 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: EVENT_LOG_v1 {"time_micros": 1773015397258413, "job": 1, "event": "recovery_finished"} 2026-03-09T00:16:37.606 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: [db/version_set.cc:5047] Creating manifest 53 2026-03-09T00:16:37.606 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed. 2026-03-09T00:16:37.606 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-a/store.db/000046.log immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000 2026-03-09T00:16:37.606 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: [db/db_impl/db_impl_open.cc:1987] SstFileManager instance 0x559d15238e00 2026-03-09T00:16:37.606 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: DB pointer 0x559d1534e000 2026-03-09T00:16:37.606 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS ------- 2026-03-09T00:16:37.606 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: rocksdb: [db/db_impl/db_impl.cc:1111] 2026-03-09T00:16:37.606 INFO:journalctl@ceph.mon.a.vm04.stdout: ** DB Stats ** 2026-03-09T00:16:37.606 INFO:journalctl@ceph.mon.a.vm04.stdout: Uptime(secs): 0.0 total, 0.0 interval 2026-03-09T00:16:37.606 INFO:journalctl@ceph.mon.a.vm04.stdout: Cumulative writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 GB, 0.00 MB/s 2026-03-09T00:16:37.606 INFO:journalctl@ceph.mon.a.vm04.stdout: Cumulative WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s 2026-03-09T00:16:37.606 INFO:journalctl@ceph.mon.a.vm04.stdout: Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent 2026-03-09T00:16:37.606 INFO:journalctl@ceph.mon.a.vm04.stdout: Interval writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 MB, 0.00 MB/s 2026-03-09T00:16:37.606 INFO:journalctl@ceph.mon.a.vm04.stdout: Interval WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s 2026-03-09T00:16:37.606 INFO:journalctl@ceph.mon.a.vm04.stdout: Interval stall: 00:00:0.000 H:M:S, 0.0 percent 2026-03-09T00:16:37.606 INFO:journalctl@ceph.mon.a.vm04.stdout: 2026-03-09T00:16:37.606 INFO:journalctl@ceph.mon.a.vm04.stdout: ** Compaction Stats [default] ** 2026-03-09T00:16:37.606 INFO:journalctl@ceph.mon.a.vm04.stdout: Level Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB) 2026-03-09T00:16:37.607 INFO:journalctl@ceph.mon.a.vm04.stdout: ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ 2026-03-09T00:16:37.607 INFO:journalctl@ceph.mon.a.vm04.stdout: L0 1/0 753.57 KB 0.2 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 235.0 0.00 0.00 1 0.003 0 0 0.0 0.0 2026-03-09T00:16:37.607 INFO:journalctl@ceph.mon.a.vm04.stdout: L6 1/0 9.52 MB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.00 0.00 0 0.000 0 0 0.0 0.0 2026-03-09T00:16:37.607 INFO:journalctl@ceph.mon.a.vm04.stdout: Sum 2/0 10.26 MB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 235.0 0.00 0.00 1 0.003 0 0 0.0 0.0 2026-03-09T00:16:37.607 INFO:journalctl@ceph.mon.a.vm04.stdout: Int 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 235.0 0.00 0.00 1 0.003 0 0 0.0 0.0 2026-03-09T00:16:37.607 INFO:journalctl@ceph.mon.a.vm04.stdout: 2026-03-09T00:16:37.607 INFO:journalctl@ceph.mon.a.vm04.stdout: ** Compaction Stats [default] ** 2026-03-09T00:16:37.607 INFO:journalctl@ceph.mon.a.vm04.stdout: Priority Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB) 2026-03-09T00:16:37.607 INFO:journalctl@ceph.mon.a.vm04.stdout: --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 2026-03-09T00:16:37.607 INFO:journalctl@ceph.mon.a.vm04.stdout: User 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 235.0 0.00 0.00 1 0.003 0 0 0.0 0.0 2026-03-09T00:16:37.607 INFO:journalctl@ceph.mon.a.vm04.stdout: 2026-03-09T00:16:37.607 INFO:journalctl@ceph.mon.a.vm04.stdout: Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0 2026-03-09T00:16:37.607 INFO:journalctl@ceph.mon.a.vm04.stdout: 2026-03-09T00:16:37.607 INFO:journalctl@ceph.mon.a.vm04.stdout: Uptime(secs): 0.0 total, 0.0 interval 2026-03-09T00:16:37.607 INFO:journalctl@ceph.mon.a.vm04.stdout: Flush(GB): cumulative 0.001, interval 0.001 2026-03-09T00:16:37.607 INFO:journalctl@ceph.mon.a.vm04.stdout: AddFile(GB): cumulative 0.000, interval 0.000 2026-03-09T00:16:37.607 INFO:journalctl@ceph.mon.a.vm04.stdout: AddFile(Total Files): cumulative 0, interval 0 2026-03-09T00:16:37.607 INFO:journalctl@ceph.mon.a.vm04.stdout: AddFile(L0 Files): cumulative 0, interval 0 2026-03-09T00:16:37.607 INFO:journalctl@ceph.mon.a.vm04.stdout: AddFile(Keys): cumulative 0, interval 0 2026-03-09T00:16:37.607 INFO:journalctl@ceph.mon.a.vm04.stdout: Cumulative compaction: 0.00 GB write, 107.46 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-09T00:16:37.607 INFO:journalctl@ceph.mon.a.vm04.stdout: Interval compaction: 0.00 GB write, 107.46 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-09T00:16:37.607 INFO:journalctl@ceph.mon.a.vm04.stdout: Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count 2026-03-09T00:16:37.607 INFO:journalctl@ceph.mon.a.vm04.stdout: Block cache BinnedLRUCache@0x559d15237350#2 capacity: 512.00 MB usage: 3.11 KB table_size: 0 occupancy: 18446744073709551615 collections: 1 last_copies: 0 last_secs: 1e-05 secs_since: 0 2026-03-09T00:16:37.607 INFO:journalctl@ceph.mon.a.vm04.stdout: Block cache entry stats(count,size,portion): FilterBlock(1,1.03 KB,0.000196695%) IndexBlock(1,2.08 KB,0.000396371%) Misc(1,0.00 KB,0%) 2026-03-09T00:16:37.607 INFO:journalctl@ceph.mon.a.vm04.stdout: 2026-03-09T00:16:37.607 INFO:journalctl@ceph.mon.a.vm04.stdout: ** File Read Latency Histogram By Level [default] ** 2026-03-09T00:16:37.607 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: starting mon.a rank 0 at public addrs [v2:192.168.123.104:3300/0,v1:192.168.123.104:6789/0] at bind addrs [v2:192.168.123.104:3300/0,v1:192.168.123.104:6789/0] mon_data /var/lib/ceph/mon/ceph-a fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 2026-03-09T00:16:37.607 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: mon.a@-1(???) e3 preinit fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 2026-03-09T00:16:37.607 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: mon.a@-1(???).mds e1 new map 2026-03-09T00:16:37.607 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: mon.a@-1(???).mds e1 print_map 2026-03-09T00:16:37.607 INFO:journalctl@ceph.mon.a.vm04.stdout: e1 2026-03-09T00:16:37.607 INFO:journalctl@ceph.mon.a.vm04.stdout: btime 1970-01-01T00:00:00:000000+0000 2026-03-09T00:16:37.607 INFO:journalctl@ceph.mon.a.vm04.stdout: enable_multiple, ever_enabled_multiple: 1,1 2026-03-09T00:16:37.607 INFO:journalctl@ceph.mon.a.vm04.stdout: default compat: compat={},rocompat={},incompat={1=base v0.20,2=client writeable ranges,3=default file layouts on dirs,4=dir inode in separate object,5=mds uses versioned encoding,6=dirfrag is stored in omap,8=no anchor table,9=file layout v2,10=snaprealm v2} 2026-03-09T00:16:37.607 INFO:journalctl@ceph.mon.a.vm04.stdout: legacy client fscid: -1 2026-03-09T00:16:37.607 INFO:journalctl@ceph.mon.a.vm04.stdout: 2026-03-09T00:16:37.607 INFO:journalctl@ceph.mon.a.vm04.stdout: No filesystems configured 2026-03-09T00:16:37.607 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: mon.a@-1(???).osd e96 crush map has features 3314933000854323200, adjusting msgr requires 2026-03-09T00:16:37.607 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: mon.a@-1(???).osd e96 crush map has features 432629239337189376, adjusting msgr requires 2026-03-09T00:16:37.607 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: mon.a@-1(???).osd e96 crush map has features 432629239337189376, adjusting msgr requires 2026-03-09T00:16:37.607 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: mon.a@-1(???).osd e96 crush map has features 432629239337189376, adjusting msgr requires 2026-03-09T00:16:37.607 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:37 vm04 ceph-mon[94619]: mon.a@-1(???).paxosservice(auth 1..24) refresh upgraded, format 0 -> 3 2026-03-09T00:16:39.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:38 vm10 ceph-mon[82076]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T00:16:39.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:38 vm10 ceph-mon[82076]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T00:16:39.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:38 vm10 ceph-mon[82076]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T00:16:39.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:38 vm10 ceph-mon[82076]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:16:39.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:38 vm10 ceph-mon[82076]: pgmap v41: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:16:39.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:38 vm10 ceph-mon[82076]: mon.a calling monitor election 2026-03-09T00:16:39.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:38 vm10 ceph-mon[82076]: mon.a is new leader, mons a,c,b in quorum (ranks 0,1,2) 2026-03-09T00:16:39.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:38 vm10 ceph-mon[82076]: monmap epoch 3 2026-03-09T00:16:39.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:38 vm10 ceph-mon[82076]: fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 2026-03-09T00:16:39.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:38 vm10 ceph-mon[82076]: last_changed 2026-03-08T23:54:41.353218+0000 2026-03-09T00:16:39.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:38 vm10 ceph-mon[82076]: created 2026-03-08T23:53:57.979597+0000 2026-03-09T00:16:39.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:38 vm10 ceph-mon[82076]: min_mon_release 17 (quincy) 2026-03-09T00:16:39.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:38 vm10 ceph-mon[82076]: election_strategy: 1 2026-03-09T00:16:39.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:38 vm10 ceph-mon[82076]: 0: [v2:192.168.123.104:3300/0,v1:192.168.123.104:6789/0] mon.a 2026-03-09T00:16:39.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:38 vm10 ceph-mon[82076]: 1: [v2:192.168.123.104:3301/0,v1:192.168.123.104:6790/0] mon.c 2026-03-09T00:16:39.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:38 vm10 ceph-mon[82076]: 2: [v2:192.168.123.110:3300/0,v1:192.168.123.110:6789/0] mon.b 2026-03-09T00:16:39.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:38 vm10 ceph-mon[82076]: fsmap 2026-03-09T00:16:39.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:38 vm10 ceph-mon[82076]: osdmap e96: 8 total, 8 up, 8 in 2026-03-09T00:16:39.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:38 vm10 ceph-mon[82076]: mgrmap e41: y(active, since 78s), standbys: x 2026-03-09T00:16:39.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:38 vm10 ceph-mon[82076]: overall HEALTH_OK 2026-03-09T00:16:39.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:38 vm10 ceph-mon[82076]: from='mgr.25000 ' entity='' 2026-03-09T00:16:39.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:38 vm10 ceph-mon[82076]: mgrmap e42: y(active, since 78s), standbys: x 2026-03-09T00:16:39.079 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:16:38 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[79180]: ignoring --setuser ceph since I am not root 2026-03-09T00:16:39.079 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:16:38 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[79180]: ignoring --setgroup ceph since I am not root 2026-03-09T00:16:39.079 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:16:38 vm10 ceph-mgr[79184]: -- 192.168.123.110:0/803236772 <== mon.2 v2:192.168.123.110:3300/0 4 ==== auth_reply(proto 2 0 (0) Success) ==== 194+0+0 (secure 0 0 0) 0x559a544a14a0 con 0x559a5447f400 2026-03-09T00:16:39.079 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:16:38 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[79180]: 2026-03-09T00:16:38.834+0000 7f5bb9052140 -1 mgr[py] Module alerts has missing NOTIFY_TYPES member 2026-03-09T00:16:39.079 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:16:38 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[79180]: 2026-03-09T00:16:38.875+0000 7f5bb9052140 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-09T00:16:39.100 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:16:38 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: ignoring --setuser ceph since I am not root 2026-03-09T00:16:39.100 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:16:38 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: ignoring --setgroup ceph since I am not root 2026-03-09T00:16:39.100 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:16:38 vm04 ceph-mgr[81427]: -- 192.168.123.104:0/3172290045 <== mon.2 v2:192.168.123.110:3300/0 3 ==== mon_map magic: 0 ==== 413+0+0 (secure 0 0 0) 0x55e3b0e0e4e0 con 0x55e3b0dcb400 2026-03-09T00:16:39.100 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:16:38 vm04 ceph-mgr[81427]: -- 192.168.123.104:0/3172290045 <== mon.2 v2:192.168.123.110:3300/0 4 ==== auth_reply(proto 2 0 (0) Success) ==== 194+0+0 (secure 0 0 0) 0x55e3b0ded4a0 con 0x55e3b0dcb400 2026-03-09T00:16:39.100 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:16:38 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: 2026-03-09T00:16:38.846+0000 7fe18eddc140 -1 mgr[py] Module alerts has missing NOTIFY_TYPES member 2026-03-09T00:16:39.100 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:16:38 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: 2026-03-09T00:16:38.890+0000 7fe18eddc140 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-09T00:16:39.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:38 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T00:16:39.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:38 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T00:16:39.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:38 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T00:16:39.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:38 vm04 ceph-mon[51053]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:16:39.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:38 vm04 ceph-mon[51053]: pgmap v41: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:16:39.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:38 vm04 ceph-mon[51053]: mon.a calling monitor election 2026-03-09T00:16:39.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:38 vm04 ceph-mon[51053]: mon.a is new leader, mons a,c,b in quorum (ranks 0,1,2) 2026-03-09T00:16:39.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:38 vm04 ceph-mon[51053]: monmap epoch 3 2026-03-09T00:16:39.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:38 vm04 ceph-mon[51053]: fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 2026-03-09T00:16:39.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:38 vm04 ceph-mon[51053]: last_changed 2026-03-08T23:54:41.353218+0000 2026-03-09T00:16:39.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:38 vm04 ceph-mon[51053]: created 2026-03-08T23:53:57.979597+0000 2026-03-09T00:16:39.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:38 vm04 ceph-mon[51053]: min_mon_release 17 (quincy) 2026-03-09T00:16:39.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:38 vm04 ceph-mon[51053]: election_strategy: 1 2026-03-09T00:16:39.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:38 vm04 ceph-mon[51053]: 0: [v2:192.168.123.104:3300/0,v1:192.168.123.104:6789/0] mon.a 2026-03-09T00:16:39.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:38 vm04 ceph-mon[51053]: 1: [v2:192.168.123.104:3301/0,v1:192.168.123.104:6790/0] mon.c 2026-03-09T00:16:39.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:38 vm04 ceph-mon[51053]: 2: [v2:192.168.123.110:3300/0,v1:192.168.123.110:6789/0] mon.b 2026-03-09T00:16:39.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:38 vm04 ceph-mon[51053]: fsmap 2026-03-09T00:16:39.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:38 vm04 ceph-mon[51053]: osdmap e96: 8 total, 8 up, 8 in 2026-03-09T00:16:39.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:38 vm04 ceph-mon[51053]: mgrmap e41: y(active, since 78s), standbys: x 2026-03-09T00:16:39.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:38 vm04 ceph-mon[51053]: overall HEALTH_OK 2026-03-09T00:16:39.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:38 vm04 ceph-mon[51053]: from='mgr.25000 ' entity='' 2026-03-09T00:16:39.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:38 vm04 ceph-mon[51053]: mgrmap e42: y(active, since 78s), standbys: x 2026-03-09T00:16:39.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:38 vm04 ceph-mon[94619]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T00:16:39.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:38 vm04 ceph-mon[94619]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T00:16:39.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:38 vm04 ceph-mon[94619]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T00:16:39.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:38 vm04 ceph-mon[94619]: from='mgr.25000 192.168.123.104:0/2868374144' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:16:39.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:38 vm04 ceph-mon[94619]: pgmap v41: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:16:39.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:38 vm04 ceph-mon[94619]: mon.a calling monitor election 2026-03-09T00:16:39.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:38 vm04 ceph-mon[94619]: mon.a is new leader, mons a,c,b in quorum (ranks 0,1,2) 2026-03-09T00:16:39.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:38 vm04 ceph-mon[94619]: monmap epoch 3 2026-03-09T00:16:39.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:38 vm04 ceph-mon[94619]: fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 2026-03-09T00:16:39.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:38 vm04 ceph-mon[94619]: last_changed 2026-03-08T23:54:41.353218+0000 2026-03-09T00:16:39.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:38 vm04 ceph-mon[94619]: created 2026-03-08T23:53:57.979597+0000 2026-03-09T00:16:39.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:38 vm04 ceph-mon[94619]: min_mon_release 17 (quincy) 2026-03-09T00:16:39.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:38 vm04 ceph-mon[94619]: election_strategy: 1 2026-03-09T00:16:39.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:38 vm04 ceph-mon[94619]: 0: [v2:192.168.123.104:3300/0,v1:192.168.123.104:6789/0] mon.a 2026-03-09T00:16:39.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:38 vm04 ceph-mon[94619]: 1: [v2:192.168.123.104:3301/0,v1:192.168.123.104:6790/0] mon.c 2026-03-09T00:16:39.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:38 vm04 ceph-mon[94619]: 2: [v2:192.168.123.110:3300/0,v1:192.168.123.110:6789/0] mon.b 2026-03-09T00:16:39.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:38 vm04 ceph-mon[94619]: fsmap 2026-03-09T00:16:39.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:38 vm04 ceph-mon[94619]: osdmap e96: 8 total, 8 up, 8 in 2026-03-09T00:16:39.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:38 vm04 ceph-mon[94619]: mgrmap e41: y(active, since 78s), standbys: x 2026-03-09T00:16:39.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:38 vm04 ceph-mon[94619]: overall HEALTH_OK 2026-03-09T00:16:39.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:38 vm04 ceph-mon[94619]: from='mgr.25000 ' entity='' 2026-03-09T00:16:39.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:38 vm04 ceph-mon[94619]: mgrmap e42: y(active, since 78s), standbys: x 2026-03-09T00:16:39.569 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:16:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[79180]: 2026-03-09T00:16:39.260+0000 7f5bb9052140 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-09T00:16:39.599 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:16:39 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: 2026-03-09T00:16:39.282+0000 7fe18eddc140 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-09T00:16:39.828 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:16:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[79180]: 2026-03-09T00:16:39.567+0000 7f5bb9052140 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-09T00:16:39.829 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:16:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[79180]: /lib64/python3.9/site-packages/scipy/__init__.py:73: UserWarning: NumPy was imported from a Python sub-interpreter but NumPy does not properly support sub-interpreters. This will likely work for most users but might cause hard to track down issues or subtle bugs. A common user of the rare sub-interpreter feature is wsgi which also allows single-interpreter mode. 2026-03-09T00:16:39.829 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:16:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[79180]: Improvements in the case of bugs are welcome, but is not on the NumPy roadmap, and full support may require significant effort to achieve. 2026-03-09T00:16:39.829 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:16:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[79180]: from numpy import show_config as show_numpy_config 2026-03-09T00:16:39.829 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:16:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[79180]: 2026-03-09T00:16:39.652+0000 7f5bb9052140 -1 mgr[py] Module diskprediction_local has missing NOTIFY_TYPES member 2026-03-09T00:16:39.829 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:16:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[79180]: 2026-03-09T00:16:39.687+0000 7f5bb9052140 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-09T00:16:39.829 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:16:39 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[79180]: 2026-03-09T00:16:39.762+0000 7f5bb9052140 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-09T00:16:39.850 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:16:39 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: 2026-03-09T00:16:39.597+0000 7fe18eddc140 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-09T00:16:39.851 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:16:39 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: /lib64/python3.9/site-packages/scipy/__init__.py:73: UserWarning: NumPy was imported from a Python sub-interpreter but NumPy does not properly support sub-interpreters. This will likely work for most users but might cause hard to track down issues or subtle bugs. A common user of the rare sub-interpreter feature is wsgi which also allows single-interpreter mode. 2026-03-09T00:16:39.851 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:16:39 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: Improvements in the case of bugs are welcome, but is not on the NumPy roadmap, and full support may require significant effort to achieve. 2026-03-09T00:16:39.851 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:16:39 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: from numpy import show_config as show_numpy_config 2026-03-09T00:16:39.851 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:16:39 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: 2026-03-09T00:16:39.680+0000 7fe18eddc140 -1 mgr[py] Module diskprediction_local has missing NOTIFY_TYPES member 2026-03-09T00:16:39.851 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:16:39 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: 2026-03-09T00:16:39.727+0000 7fe18eddc140 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-09T00:16:39.851 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:16:39 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: 2026-03-09T00:16:39.794+0000 7fe18eddc140 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-09T00:16:40.507 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:16:40 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[79180]: 2026-03-09T00:16:40.239+0000 7f5bb9052140 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-09T00:16:40.507 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:16:40 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[79180]: 2026-03-09T00:16:40.354+0000 7f5bb9052140 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-09T00:16:40.507 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:16:40 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[79180]: 2026-03-09T00:16:40.394+0000 7f5bb9052140 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-09T00:16:40.508 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:16:40 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[79180]: 2026-03-09T00:16:40.428+0000 7f5bb9052140 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-09T00:16:40.508 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:16:40 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[79180]: 2026-03-09T00:16:40.469+0000 7f5bb9052140 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-09T00:16:40.540 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:16:40 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: 2026-03-09T00:16:40.279+0000 7fe18eddc140 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-09T00:16:40.540 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:16:40 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: 2026-03-09T00:16:40.390+0000 7fe18eddc140 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-09T00:16:40.540 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:16:40 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: 2026-03-09T00:16:40.429+0000 7fe18eddc140 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-09T00:16:40.540 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:16:40 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: 2026-03-09T00:16:40.463+0000 7fe18eddc140 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-09T00:16:40.540 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:16:40 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: 2026-03-09T00:16:40.503+0000 7fe18eddc140 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-09T00:16:40.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:40 vm10 ceph-mon[82076]: mgrmap e43: y(active, since 79s), standbys: x 2026-03-09T00:16:40.828 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:16:40 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[79180]: 2026-03-09T00:16:40.506+0000 7f5bb9052140 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-09T00:16:40.828 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:16:40 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[79180]: 2026-03-09T00:16:40.669+0000 7f5bb9052140 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-09T00:16:40.828 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:16:40 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[79180]: 2026-03-09T00:16:40.722+0000 7f5bb9052140 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-09T00:16:40.850 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:16:40 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: 2026-03-09T00:16:40.538+0000 7fe18eddc140 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-09T00:16:40.850 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:16:40 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: 2026-03-09T00:16:40.709+0000 7fe18eddc140 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-09T00:16:40.851 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:16:40 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: 2026-03-09T00:16:40.760+0000 7fe18eddc140 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-09T00:16:40.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:40 vm04 ceph-mon[51053]: mgrmap e43: y(active, since 79s), standbys: x 2026-03-09T00:16:40.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:40 vm04 ceph-mon[94619]: mgrmap e43: y(active, since 79s), standbys: x 2026-03-09T00:16:41.214 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:16:40 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[79180]: 2026-03-09T00:16:40.937+0000 7f5bb9052140 -1 mgr[py] Module rgw has missing NOTIFY_TYPES member 2026-03-09T00:16:41.269 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:16:40 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: 2026-03-09T00:16:40.991+0000 7fe18eddc140 -1 mgr[py] Module rgw has missing NOTIFY_TYPES member 2026-03-09T00:16:41.482 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:16:41 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[79180]: 2026-03-09T00:16:41.213+0000 7f5bb9052140 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-09T00:16:41.482 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:16:41 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[79180]: 2026-03-09T00:16:41.248+0000 7f5bb9052140 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-09T00:16:41.482 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:16:41 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[79180]: 2026-03-09T00:16:41.289+0000 7f5bb9052140 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-09T00:16:41.482 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:16:41 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[79180]: 2026-03-09T00:16:41.364+0000 7f5bb9052140 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-09T00:16:41.482 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:16:41 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[79180]: 2026-03-09T00:16:41.405+0000 7f5bb9052140 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-09T00:16:41.537 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:16:41 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: 2026-03-09T00:16:41.267+0000 7fe18eddc140 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-09T00:16:41.537 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:16:41 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: 2026-03-09T00:16:41.304+0000 7fe18eddc140 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-09T00:16:41.537 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:16:41 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: 2026-03-09T00:16:41.344+0000 7fe18eddc140 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-09T00:16:41.537 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:16:41 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: 2026-03-09T00:16:41.423+0000 7fe18eddc140 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-09T00:16:41.537 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:16:41 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: 2026-03-09T00:16:41.459+0000 7fe18eddc140 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-09T00:16:41.756 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:16:41 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[79180]: 2026-03-09T00:16:41.481+0000 7f5bb9052140 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-09T00:16:41.756 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:16:41 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[79180]: 2026-03-09T00:16:41.590+0000 7f5bb9052140 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-09T00:16:41.756 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:16:41 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[79180]: 2026-03-09T00:16:41.720+0000 7f5bb9052140 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-09T00:16:41.827 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:16:41 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: 2026-03-09T00:16:41.535+0000 7fe18eddc140 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-09T00:16:41.827 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:16:41 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: 2026-03-09T00:16:41.648+0000 7fe18eddc140 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-09T00:16:41.827 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:16:41 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: 2026-03-09T00:16:41.784+0000 7fe18eddc140 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-09T00:16:42.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:41 vm10 ceph-mon[82076]: Standby manager daemon x restarted 2026-03-09T00:16:42.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:41 vm10 ceph-mon[82076]: Standby manager daemon x started 2026-03-09T00:16:42.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:41 vm10 ceph-mon[82076]: from='mgr.? 192.168.123.110:0/847714446' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-09T00:16:42.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:41 vm10 ceph-mon[82076]: from='mgr.? 192.168.123.110:0/847714446' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-09T00:16:42.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:41 vm10 ceph-mon[82076]: from='mgr.? 192.168.123.110:0/847714446' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-09T00:16:42.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:41 vm10 ceph-mon[82076]: from='mgr.? 192.168.123.110:0/847714446' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-09T00:16:42.078 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:16:41 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[79180]: 2026-03-09T00:16:41.754+0000 7f5bb9052140 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-09T00:16:42.078 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:16:41 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[79180]: [09/Mar/2026:00:16:41] ENGINE Bus STARTING 2026-03-09T00:16:42.078 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:16:41 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[79180]: CherryPy Checker: 2026-03-09T00:16:42.078 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:16:41 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[79180]: The Application mounted at '' has an empty config. 2026-03-09T00:16:42.078 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:16:41 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[79180]: 2026-03-09T00:16:42.078 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:16:41 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[79180]: [09/Mar/2026:00:16:41] ENGINE Serving on http://:::9283 2026-03-09T00:16:42.078 INFO:journalctl@ceph.mgr.x.vm10.stdout:Mar 09 00:16:41 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-x[79180]: [09/Mar/2026:00:16:41] ENGINE Bus STARTED 2026-03-09T00:16:42.100 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:16:41 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: 2026-03-09T00:16:41.832+0000 7fe18eddc140 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-09T00:16:42.100 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:16:41 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: [09/Mar/2026:00:16:41] ENGINE Bus STARTING 2026-03-09T00:16:42.101 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:16:41 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: CherryPy Checker: 2026-03-09T00:16:42.101 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:16:41 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: The Application mounted at '' has an empty config. 2026-03-09T00:16:42.101 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:16:41 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: 2026-03-09T00:16:42.101 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:16:42 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: [09/Mar/2026:00:16:42] ENGINE Serving on http://:::9283 2026-03-09T00:16:42.101 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:16:42 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: [09/Mar/2026:00:16:42] ENGINE Bus STARTED 2026-03-09T00:16:42.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:41 vm04 ceph-mon[51053]: Standby manager daemon x restarted 2026-03-09T00:16:42.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:41 vm04 ceph-mon[51053]: Standby manager daemon x started 2026-03-09T00:16:42.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:41 vm04 ceph-mon[51053]: from='mgr.? 192.168.123.110:0/847714446' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-09T00:16:42.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:41 vm04 ceph-mon[51053]: from='mgr.? 192.168.123.110:0/847714446' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-09T00:16:42.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:41 vm04 ceph-mon[51053]: from='mgr.? 192.168.123.110:0/847714446' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-09T00:16:42.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:41 vm04 ceph-mon[51053]: from='mgr.? 192.168.123.110:0/847714446' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-09T00:16:42.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:41 vm04 ceph-mon[94619]: Standby manager daemon x restarted 2026-03-09T00:16:42.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:41 vm04 ceph-mon[94619]: Standby manager daemon x started 2026-03-09T00:16:42.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:41 vm04 ceph-mon[94619]: from='mgr.? 192.168.123.110:0/847714446' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-09T00:16:42.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:41 vm04 ceph-mon[94619]: from='mgr.? 192.168.123.110:0/847714446' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-09T00:16:42.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:41 vm04 ceph-mon[94619]: from='mgr.? 192.168.123.110:0/847714446' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-09T00:16:42.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:41 vm04 ceph-mon[94619]: from='mgr.? 192.168.123.110:0/847714446' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-09T00:16:42.881 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:42 vm10 ceph-mon[82076]: mgrmap e44: y(active, since 81s), standbys: x 2026-03-09T00:16:42.881 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:42 vm10 ceph-mon[82076]: Active manager daemon y restarted 2026-03-09T00:16:42.881 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:42 vm10 ceph-mon[82076]: Activating manager daemon y 2026-03-09T00:16:42.881 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:42 vm10 ceph-mon[82076]: osdmap e97: 8 total, 8 up, 8 in 2026-03-09T00:16:42.881 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:42 vm10 ceph-mon[82076]: mgrmap e45: y(active, starting, since 0.0185379s), standbys: x 2026-03-09T00:16:42.881 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:42 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T00:16:42.881 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:42 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T00:16:42.881 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:42 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T00:16:42.881 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:42 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-09T00:16:42.881 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:42 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-09T00:16:42.881 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:42 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-09T00:16:42.881 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:42 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-09T00:16:42.881 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:42 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-09T00:16:42.881 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:42 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T00:16:42.881 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:42 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T00:16:42.881 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:42 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T00:16:42.881 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:42 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T00:16:42.881 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:42 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T00:16:42.881 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:42 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T00:16:42.881 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:42 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T00:16:42.881 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:42 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T00:16:42.881 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:42 vm10 ceph-mon[82076]: Manager daemon y is now available 2026-03-09T00:16:42.881 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:42 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:16:42.881 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:42 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:16:42.881 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:42 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:16:42.881 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:42 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T00:16:43.000 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:16:42 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: 2026-03-09T00:16:42.867+0000 7fe15b141640 -1 mgr.server handle_report got status from non-daemon mon.a 2026-03-09T00:16:43.001 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:42 vm04 ceph-mon[94619]: mgrmap e44: y(active, since 81s), standbys: x 2026-03-09T00:16:43.001 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:42 vm04 ceph-mon[94619]: Active manager daemon y restarted 2026-03-09T00:16:43.001 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:42 vm04 ceph-mon[94619]: Activating manager daemon y 2026-03-09T00:16:43.001 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:42 vm04 ceph-mon[94619]: osdmap e97: 8 total, 8 up, 8 in 2026-03-09T00:16:43.001 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:42 vm04 ceph-mon[94619]: mgrmap e45: y(active, starting, since 0.0185379s), standbys: x 2026-03-09T00:16:43.001 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:42 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T00:16:43.001 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:42 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T00:16:43.001 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:42 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T00:16:43.001 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:42 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-09T00:16:43.001 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:42 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-09T00:16:43.001 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:42 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-09T00:16:43.001 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:42 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-09T00:16:43.001 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:42 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-09T00:16:43.001 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:42 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T00:16:43.001 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:42 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T00:16:43.001 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:42 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T00:16:43.001 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:42 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T00:16:43.001 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:42 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T00:16:43.001 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:42 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T00:16:43.002 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:42 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T00:16:43.002 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:42 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T00:16:43.002 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:42 vm04 ceph-mon[94619]: Manager daemon y is now available 2026-03-09T00:16:43.002 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:42 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:16:43.002 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:42 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:16:43.002 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:42 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:16:43.002 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:42 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T00:16:43.002 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:42 vm04 ceph-mon[51053]: mgrmap e44: y(active, since 81s), standbys: x 2026-03-09T00:16:43.002 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:42 vm04 ceph-mon[51053]: Active manager daemon y restarted 2026-03-09T00:16:43.002 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:42 vm04 ceph-mon[51053]: Activating manager daemon y 2026-03-09T00:16:43.002 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:42 vm04 ceph-mon[51053]: osdmap e97: 8 total, 8 up, 8 in 2026-03-09T00:16:43.002 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:42 vm04 ceph-mon[51053]: mgrmap e45: y(active, starting, since 0.0185379s), standbys: x 2026-03-09T00:16:43.002 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:42 vm04 ceph-mon[51053]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T00:16:43.002 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:42 vm04 ceph-mon[51053]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T00:16:43.002 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:42 vm04 ceph-mon[51053]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T00:16:43.002 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:42 vm04 ceph-mon[51053]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-09T00:16:43.002 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:42 vm04 ceph-mon[51053]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-09T00:16:43.002 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:42 vm04 ceph-mon[51053]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-09T00:16:43.002 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:42 vm04 ceph-mon[51053]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-09T00:16:43.002 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:42 vm04 ceph-mon[51053]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-09T00:16:43.002 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:42 vm04 ceph-mon[51053]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T00:16:43.002 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:42 vm04 ceph-mon[51053]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T00:16:43.002 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:42 vm04 ceph-mon[51053]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T00:16:43.002 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:42 vm04 ceph-mon[51053]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T00:16:43.002 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:42 vm04 ceph-mon[51053]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T00:16:43.002 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:42 vm04 ceph-mon[51053]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T00:16:43.002 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:42 vm04 ceph-mon[51053]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T00:16:43.002 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:42 vm04 ceph-mon[51053]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T00:16:43.002 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:42 vm04 ceph-mon[51053]: Manager daemon y is now available 2026-03-09T00:16:43.002 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:42 vm04 ceph-mon[51053]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:16:43.002 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:42 vm04 ceph-mon[51053]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:16:43.002 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:42 vm04 ceph-mon[51053]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T00:16:43.002 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:42 vm04 ceph-mon[51053]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T00:16:43.853 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:43 vm04 ceph-mon[94619]: mgrmap e46: y(active, since 1.02098s), standbys: x 2026-03-09T00:16:43.853 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:43 vm04 ceph-mon[94619]: [09/Mar/2026:00:16:43] ENGINE Bus STARTING 2026-03-09T00:16:43.853 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:43 vm04 ceph-mon[94619]: [09/Mar/2026:00:16:43] ENGINE Serving on http://192.168.123.104:8765 2026-03-09T00:16:43.853 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:43 vm04 ceph-mon[94619]: [09/Mar/2026:00:16:43] ENGINE Serving on https://192.168.123.104:7150 2026-03-09T00:16:43.853 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:43 vm04 ceph-mon[94619]: [09/Mar/2026:00:16:43] ENGINE Bus STARTED 2026-03-09T00:16:43.853 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:43 vm04 ceph-mon[94619]: [09/Mar/2026:00:16:43] ENGINE Client ('192.168.123.104', 53392) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-09T00:16:43.853 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:43 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:43.853 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:43 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:43.853 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:43 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:43.853 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:43 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:43.853 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:43 vm04 ceph-mon[51053]: mgrmap e46: y(active, since 1.02098s), standbys: x 2026-03-09T00:16:43.853 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:43 vm04 ceph-mon[51053]: [09/Mar/2026:00:16:43] ENGINE Bus STARTING 2026-03-09T00:16:43.853 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:43 vm04 ceph-mon[51053]: [09/Mar/2026:00:16:43] ENGINE Serving on http://192.168.123.104:8765 2026-03-09T00:16:43.853 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:43 vm04 ceph-mon[51053]: [09/Mar/2026:00:16:43] ENGINE Serving on https://192.168.123.104:7150 2026-03-09T00:16:43.853 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:43 vm04 ceph-mon[51053]: [09/Mar/2026:00:16:43] ENGINE Bus STARTED 2026-03-09T00:16:43.853 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:43 vm04 ceph-mon[51053]: [09/Mar/2026:00:16:43] ENGINE Client ('192.168.123.104', 53392) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-09T00:16:43.853 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:43 vm04 ceph-mon[51053]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:43.853 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:43 vm04 ceph-mon[51053]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:43.853 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:43 vm04 ceph-mon[51053]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:43.853 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:43 vm04 ceph-mon[51053]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:44.114 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:43 vm10 ceph-mon[82076]: mgrmap e46: y(active, since 1.02098s), standbys: x 2026-03-09T00:16:44.114 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:43 vm10 ceph-mon[82076]: [09/Mar/2026:00:16:43] ENGINE Bus STARTING 2026-03-09T00:16:44.114 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:43 vm10 ceph-mon[82076]: [09/Mar/2026:00:16:43] ENGINE Serving on http://192.168.123.104:8765 2026-03-09T00:16:44.114 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:43 vm10 ceph-mon[82076]: [09/Mar/2026:00:16:43] ENGINE Serving on https://192.168.123.104:7150 2026-03-09T00:16:44.114 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:43 vm10 ceph-mon[82076]: [09/Mar/2026:00:16:43] ENGINE Bus STARTED 2026-03-09T00:16:44.114 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:43 vm10 ceph-mon[82076]: [09/Mar/2026:00:16:43] ENGINE Client ('192.168.123.104', 53392) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-09T00:16:44.114 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:43 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:44.114 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:43 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:44.114 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:43 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:44.114 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:43 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:45.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:44 vm10 ceph-mon[82076]: pgmap v4: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail 2026-03-09T00:16:45.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:44 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:45.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:44 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:45.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:44 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:45.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:44 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:45.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:44 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm04", "name": "osd_memory_target"}]: dispatch 2026-03-09T00:16:45.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:44 vm10 ceph-mon[82076]: mgrmap e47: y(active, since 2s), standbys: x 2026-03-09T00:16:45.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:44 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:45.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:44 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:45.329 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:44 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm10", "name": "osd_memory_target"}]: dispatch 2026-03-09T00:16:45.329 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:44 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:16:45.329 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:44 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:16:45.329 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:44 vm10 ceph-mon[82076]: Updating vm04:/etc/ceph/ceph.conf 2026-03-09T00:16:45.329 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:44 vm10 ceph-mon[82076]: Updating vm10:/etc/ceph/ceph.conf 2026-03-09T00:16:45.350 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:16:45 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:16:45] "GET /metrics HTTP/1.1" 200 34772 "" "Prometheus/2.51.0" 2026-03-09T00:16:45.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:44 vm04 ceph-mon[51053]: pgmap v4: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail 2026-03-09T00:16:45.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:44 vm04 ceph-mon[51053]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:45.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:44 vm04 ceph-mon[51053]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:45.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:44 vm04 ceph-mon[51053]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:45.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:44 vm04 ceph-mon[51053]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:45.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:44 vm04 ceph-mon[51053]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm04", "name": "osd_memory_target"}]: dispatch 2026-03-09T00:16:45.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:44 vm04 ceph-mon[51053]: mgrmap e47: y(active, since 2s), standbys: x 2026-03-09T00:16:45.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:44 vm04 ceph-mon[51053]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:45.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:44 vm04 ceph-mon[51053]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:45.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:44 vm04 ceph-mon[51053]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm10", "name": "osd_memory_target"}]: dispatch 2026-03-09T00:16:45.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:44 vm04 ceph-mon[51053]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:16:45.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:44 vm04 ceph-mon[51053]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:16:45.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:44 vm04 ceph-mon[51053]: Updating vm04:/etc/ceph/ceph.conf 2026-03-09T00:16:45.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:44 vm04 ceph-mon[51053]: Updating vm10:/etc/ceph/ceph.conf 2026-03-09T00:16:45.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:44 vm04 ceph-mon[94619]: pgmap v4: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail 2026-03-09T00:16:45.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:44 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:45.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:44 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:45.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:44 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:45.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:44 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:45.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:44 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm04", "name": "osd_memory_target"}]: dispatch 2026-03-09T00:16:45.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:44 vm04 ceph-mon[94619]: mgrmap e47: y(active, since 2s), standbys: x 2026-03-09T00:16:45.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:44 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:45.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:44 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:45.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:44 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm10", "name": "osd_memory_target"}]: dispatch 2026-03-09T00:16:45.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:44 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:16:45.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:44 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:16:45.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:44 vm04 ceph-mon[94619]: Updating vm04:/etc/ceph/ceph.conf 2026-03-09T00:16:45.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:44 vm04 ceph-mon[94619]: Updating vm10:/etc/ceph/ceph.conf 2026-03-09T00:16:46.178 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 systemd[1]: Stopping Ceph mon.c for fdcbddf6-1b49-11f1-80b0-7392062373f9... 2026-03-09T00:16:46.430 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mon-c[51049]: 2026-03-09T00:16:46.176+0000 7f49106b1700 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-mon -n mon.c -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false (PID: 1) UID: 0 2026-03-09T00:16:46.430 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mon-c[51049]: 2026-03-09T00:16:46.176+0000 7f49106b1700 -1 mon.c@1(peon) e3 *** Got Signal Terminated *** 2026-03-09T00:16:46.430 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 podman[96308]: 2026-03-09 00:16:46.293205629 +0000 UTC m=+0.128904802 container died 5c2d9165643cad1ecd1971a3d02dce7cd0119af2d8d3355b4b622e245e11dbb7 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mon-c, release=754, CEPH_POINT_RELEASE=-17.2.0, ceph=True, io.openshift.expose-services=, com.redhat.component=centos-stream-container, io.openshift.tags=base centos centos-stream, name=centos-stream, vendor=Red Hat, Inc., GIT_REPO=https://github.com/ceph/ceph-container.git, GIT_BRANCH=HEAD, distribution-scope=public, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, architecture=x86_64, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, GIT_CLEAN=True, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, RELEASE=HEAD, io.buildah.version=1.19.8, build-date=2022-05-03T08:36:31.336870, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, vcs-type=git, version=8, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, maintainer=Guillaume Abrioux , io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.k8s.display-name=CentOS Stream 8) 2026-03-09T00:16:46.430 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 podman[96308]: 2026-03-09 00:16:46.30955581 +0000 UTC m=+0.145254983 container remove 5c2d9165643cad1ecd1971a3d02dce7cd0119af2d8d3355b4b622e245e11dbb7 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mon-c, GIT_CLEAN=True, distribution-scope=public, io.k8s.display-name=CentOS Stream 8, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, GIT_BRANCH=HEAD, io.buildah.version=1.19.8, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, maintainer=Guillaume Abrioux , vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, ceph=True, CEPH_POINT_RELEASE=-17.2.0, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.tags=base centos centos-stream, GIT_REPO=https://github.com/ceph/ceph-container.git, build-date=2022-05-03T08:36:31.336870, io.openshift.expose-services=, release=754, vcs-type=git, architecture=x86_64, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, com.redhat.component=centos-stream-container, RELEASE=HEAD, name=centos-stream, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, version=8, vendor=Red Hat, Inc.) 2026-03-09T00:16:46.430 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 bash[96308]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mon-c 2026-03-09T00:16:46.430 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 systemd[1]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@mon.c.service: Deactivated successfully. 2026-03-09T00:16:46.430 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 systemd[1]: Stopped Ceph mon.c for fdcbddf6-1b49-11f1-80b0-7392062373f9. 2026-03-09T00:16:46.430 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 systemd[1]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@mon.c.service: Consumed 10.172s CPU time. 2026-03-09T00:16:46.705 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 systemd[1]: Starting Ceph mon.c for fdcbddf6-1b49-11f1-80b0-7392062373f9... 2026-03-09T00:16:46.705 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 podman[96423]: 2026-03-09 00:16:46.637566615 +0000 UTC m=+0.016456702 container create d5119f6d234550c7eab0f26860cff5b292fa76d9ebbf497c4adf645e16ab99ec (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mon-c, org.label-schema.schema-version=1.0, ceph=True, org.opencontainers.image.authors=Ceph Release Team , FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.build-date=20260223, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.41.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid) 2026-03-09T00:16:46.705 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 podman[96423]: 2026-03-09 00:16:46.672082355 +0000 UTC m=+0.050972442 container init d5119f6d234550c7eab0f26860cff5b292fa76d9ebbf497c4adf645e16ab99ec (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mon-c, org.opencontainers.image.authors=Ceph Release Team , FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.build-date=20260223, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, OSD_FLAVOR=default, io.buildah.version=1.41.3, CEPH_REF=squid, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-09T00:16:46.705 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 podman[96423]: 2026-03-09 00:16:46.674996699 +0000 UTC m=+0.053886786 container start d5119f6d234550c7eab0f26860cff5b292fa76d9ebbf497c4adf645e16ab99ec (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mon-c, OSD_FLAVOR=default, io.buildah.version=1.41.3, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team , FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2) 2026-03-09T00:16:46.705 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 bash[96423]: d5119f6d234550c7eab0f26860cff5b292fa76d9ebbf497c4adf645e16ab99ec 2026-03-09T00:16:46.705 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 podman[96423]: 2026-03-09 00:16:46.630714719 +0000 UTC m=+0.009604806 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc 2026-03-09T00:16:46.705 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 systemd[1]: Started Ceph mon.c for fdcbddf6-1b49-11f1-80b0-7392062373f9. 2026-03-09T00:16:46.705 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: set uid:gid to 167:167 (ceph:ceph) 2026-03-09T00:16:46.705 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable), process ceph-mon, pid 2 2026-03-09T00:16:46.705 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: pidfile_write: ignore empty --pid-file 2026-03-09T00:16:46.705 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: load: jerasure load: lrc 2026-03-09T00:16:46.705 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: RocksDB version: 7.9.2 2026-03-09T00:16:46.705 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Git sha 0 2026-03-09T00:16:46.705 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Compile date 2026-02-25 18:11:04 2026-03-09T00:16:46.705 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: DB SUMMARY 2026-03-09T00:16:46.705 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: DB Session ID: 970WD6WE23VNOSNS1MAC 2026-03-09T00:16:46.705 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: CURRENT file: CURRENT 2026-03-09T00:16:46.705 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: IDENTITY file: IDENTITY 2026-03-09T00:16:46.705 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: MANIFEST file: MANIFEST-000009 size: 2266 Bytes 2026-03-09T00:16:46.705 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: SST files in /var/lib/ceph/mon/ceph-c/store.db dir, Total Num: 1, files: 000045.sst 2026-03-09T00:16:46.705 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Write Ahead Log file in /var/lib/ceph/mon/ceph-c/store.db: 000043.log size: 1751765 ; 2026-03-09T00:16:46.705 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.error_if_exists: 0 2026-03-09T00:16:46.705 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.create_if_missing: 0 2026-03-09T00:16:46.705 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.paranoid_checks: 1 2026-03-09T00:16:46.706 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.flush_verify_memtable_count: 1 2026-03-09T00:16:46.706 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.track_and_verify_wals_in_manifest: 0 2026-03-09T00:16:46.706 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.verify_sst_unique_id_in_manifest: 1 2026-03-09T00:16:46.706 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.env: 0x55751368ddc0 2026-03-09T00:16:46.706 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.fs: PosixFileSystem 2026-03-09T00:16:46.706 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.info_log: 0x557514d6a5c0 2026-03-09T00:16:46.706 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.max_file_opening_threads: 16 2026-03-09T00:16:46.706 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.statistics: (nil) 2026-03-09T00:16:46.706 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.use_fsync: 0 2026-03-09T00:16:46.706 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.max_log_file_size: 0 2026-03-09T00:16:46.706 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.max_manifest_file_size: 1073741824 2026-03-09T00:16:46.706 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.log_file_time_to_roll: 0 2026-03-09T00:16:46.706 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.keep_log_file_num: 1000 2026-03-09T00:16:46.706 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.recycle_log_file_num: 0 2026-03-09T00:16:46.706 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.allow_fallocate: 1 2026-03-09T00:16:46.706 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.allow_mmap_reads: 0 2026-03-09T00:16:46.706 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.allow_mmap_writes: 0 2026-03-09T00:16:46.706 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.use_direct_reads: 0 2026-03-09T00:16:46.706 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.use_direct_io_for_flush_and_compaction: 0 2026-03-09T00:16:46.706 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.create_missing_column_families: 0 2026-03-09T00:16:46.706 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.db_log_dir: 2026-03-09T00:16:46.706 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.wal_dir: 2026-03-09T00:16:46.706 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.table_cache_numshardbits: 6 2026-03-09T00:16:46.706 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.WAL_ttl_seconds: 0 2026-03-09T00:16:46.706 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.WAL_size_limit_MB: 0 2026-03-09T00:16:46.706 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.max_write_batch_group_size_bytes: 1048576 2026-03-09T00:16:46.706 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.manifest_preallocation_size: 4194304 2026-03-09T00:16:46.706 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.is_fd_close_on_exec: 1 2026-03-09T00:16:46.706 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.advise_random_on_open: 1 2026-03-09T00:16:46.706 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.db_write_buffer_size: 0 2026-03-09T00:16:46.706 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.write_buffer_manager: 0x557514d6f900 2026-03-09T00:16:46.706 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.access_hint_on_compaction_start: 1 2026-03-09T00:16:46.706 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.random_access_max_buffer_size: 1048576 2026-03-09T00:16:46.706 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.use_adaptive_mutex: 0 2026-03-09T00:16:46.706 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.rate_limiter: (nil) 2026-03-09T00:16:46.706 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.sst_file_manager.rate_bytes_per_sec: 0 2026-03-09T00:16:46.706 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.wal_recovery_mode: 2 2026-03-09T00:16:46.706 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.enable_thread_tracking: 0 2026-03-09T00:16:46.706 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.enable_pipelined_write: 0 2026-03-09T00:16:46.706 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.unordered_write: 0 2026-03-09T00:16:46.706 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.allow_concurrent_memtable_write: 1 2026-03-09T00:16:46.706 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.enable_write_thread_adaptive_yield: 1 2026-03-09T00:16:46.706 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.write_thread_max_yield_usec: 100 2026-03-09T00:16:46.706 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.write_thread_slow_yield_usec: 3 2026-03-09T00:16:46.706 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.row_cache: None 2026-03-09T00:16:46.706 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.wal_filter: None 2026-03-09T00:16:46.706 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.avoid_flush_during_recovery: 0 2026-03-09T00:16:46.706 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.allow_ingest_behind: 0 2026-03-09T00:16:46.706 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.two_write_queues: 0 2026-03-09T00:16:46.706 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.manual_wal_flush: 0 2026-03-09T00:16:46.706 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.wal_compression: 0 2026-03-09T00:16:46.706 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.atomic_flush: 0 2026-03-09T00:16:46.706 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.avoid_unnecessary_blocking_io: 0 2026-03-09T00:16:46.706 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.persist_stats_to_disk: 0 2026-03-09T00:16:46.706 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.write_dbid_to_manifest: 0 2026-03-09T00:16:46.707 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.log_readahead_size: 0 2026-03-09T00:16:46.707 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.file_checksum_gen_factory: Unknown 2026-03-09T00:16:46.707 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.best_efforts_recovery: 0 2026-03-09T00:16:46.707 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.max_bgerror_resume_count: 2147483647 2026-03-09T00:16:46.707 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.bgerror_resume_retry_interval: 1000000 2026-03-09T00:16:46.707 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.allow_data_in_errors: 0 2026-03-09T00:16:46.707 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.db_host_id: __hostname__ 2026-03-09T00:16:46.707 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.enforce_single_del_contracts: true 2026-03-09T00:16:46.707 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.max_background_jobs: 2 2026-03-09T00:16:46.707 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.max_background_compactions: -1 2026-03-09T00:16:46.707 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.max_subcompactions: 1 2026-03-09T00:16:46.707 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.avoid_flush_during_shutdown: 0 2026-03-09T00:16:46.707 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.writable_file_max_buffer_size: 1048576 2026-03-09T00:16:46.707 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.delayed_write_rate : 16777216 2026-03-09T00:16:46.707 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.max_total_wal_size: 0 2026-03-09T00:16:46.707 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.delete_obsolete_files_period_micros: 21600000000 2026-03-09T00:16:46.707 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.stats_dump_period_sec: 600 2026-03-09T00:16:46.707 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.stats_persist_period_sec: 600 2026-03-09T00:16:46.707 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.stats_history_buffer_size: 1048576 2026-03-09T00:16:46.707 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.max_open_files: -1 2026-03-09T00:16:46.707 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.bytes_per_sync: 0 2026-03-09T00:16:46.707 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.wal_bytes_per_sync: 0 2026-03-09T00:16:46.707 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.strict_bytes_per_sync: 0 2026-03-09T00:16:46.707 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.compaction_readahead_size: 0 2026-03-09T00:16:46.707 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.max_background_flushes: -1 2026-03-09T00:16:46.707 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Compression algorithms supported: 2026-03-09T00:16:46.707 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: kZSTD supported: 0 2026-03-09T00:16:46.707 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: kXpressCompression supported: 0 2026-03-09T00:16:46.707 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: kBZip2Compression supported: 0 2026-03-09T00:16:46.707 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: kZSTDNotFinalCompression supported: 0 2026-03-09T00:16:46.707 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: kLZ4Compression supported: 1 2026-03-09T00:16:46.707 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: kZlibCompression supported: 1 2026-03-09T00:16:46.707 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: kLZ4HCCompression supported: 1 2026-03-09T00:16:46.707 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: kSnappyCompression supported: 1 2026-03-09T00:16:46.707 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Fast CRC32 supported: Supported on x86 2026-03-09T00:16:46.707 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: DMutex implementation: pthread_mutex_t 2026-03-09T00:16:46.707 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: [db/version_set.cc:5527] Recovering from manifest file: /var/lib/ceph/mon/ceph-c/store.db/MANIFEST-000009 2026-03-09T00:16:46.707 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [default]: 2026-03-09T00:16:46.836 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.comparator: leveldb.BytewiseComparator 2026-03-09T00:16:46.836 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.merge_operator: 2026-03-09T00:16:46.836 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.compaction_filter: None 2026-03-09T00:16:46.836 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.compaction_filter_factory: None 2026-03-09T00:16:46.836 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.sst_partitioner_factory: None 2026-03-09T00:16:46.836 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.memtable_factory: SkipListFactory 2026-03-09T00:16:46.836 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.table_factory: BlockBasedTable 2026-03-09T00:16:46.836 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: table_factory options: flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x557514d6a5a0) 2026-03-09T00:16:46.836 INFO:journalctl@ceph.mon.c.vm04.stdout: cache_index_and_filter_blocks: 1 2026-03-09T00:16:46.836 INFO:journalctl@ceph.mon.c.vm04.stdout: cache_index_and_filter_blocks_with_high_priority: 0 2026-03-09T00:16:46.836 INFO:journalctl@ceph.mon.c.vm04.stdout: pin_l0_filter_and_index_blocks_in_cache: 0 2026-03-09T00:16:46.836 INFO:journalctl@ceph.mon.c.vm04.stdout: pin_top_level_index_and_filter: 1 2026-03-09T00:16:46.836 INFO:journalctl@ceph.mon.c.vm04.stdout: index_type: 0 2026-03-09T00:16:46.836 INFO:journalctl@ceph.mon.c.vm04.stdout: data_block_index_type: 0 2026-03-09T00:16:46.836 INFO:journalctl@ceph.mon.c.vm04.stdout: index_shortening: 1 2026-03-09T00:16:46.836 INFO:journalctl@ceph.mon.c.vm04.stdout: data_block_hash_table_util_ratio: 0.750000 2026-03-09T00:16:46.836 INFO:journalctl@ceph.mon.c.vm04.stdout: checksum: 4 2026-03-09T00:16:46.837 INFO:journalctl@ceph.mon.c.vm04.stdout: no_block_cache: 0 2026-03-09T00:16:46.837 INFO:journalctl@ceph.mon.c.vm04.stdout: block_cache: 0x557514d8f350 2026-03-09T00:16:46.837 INFO:journalctl@ceph.mon.c.vm04.stdout: block_cache_name: BinnedLRUCache 2026-03-09T00:16:46.837 INFO:journalctl@ceph.mon.c.vm04.stdout: block_cache_options: 2026-03-09T00:16:46.837 INFO:journalctl@ceph.mon.c.vm04.stdout: capacity : 536870912 2026-03-09T00:16:46.837 INFO:journalctl@ceph.mon.c.vm04.stdout: num_shard_bits : 4 2026-03-09T00:16:46.837 INFO:journalctl@ceph.mon.c.vm04.stdout: strict_capacity_limit : 0 2026-03-09T00:16:46.837 INFO:journalctl@ceph.mon.c.vm04.stdout: high_pri_pool_ratio: 0.000 2026-03-09T00:16:46.837 INFO:journalctl@ceph.mon.c.vm04.stdout: block_cache_compressed: (nil) 2026-03-09T00:16:46.837 INFO:journalctl@ceph.mon.c.vm04.stdout: persistent_cache: (nil) 2026-03-09T00:16:46.837 INFO:journalctl@ceph.mon.c.vm04.stdout: block_size: 4096 2026-03-09T00:16:46.837 INFO:journalctl@ceph.mon.c.vm04.stdout: block_size_deviation: 10 2026-03-09T00:16:46.837 INFO:journalctl@ceph.mon.c.vm04.stdout: block_restart_interval: 16 2026-03-09T00:16:46.837 INFO:journalctl@ceph.mon.c.vm04.stdout: index_block_restart_interval: 1 2026-03-09T00:16:46.837 INFO:journalctl@ceph.mon.c.vm04.stdout: metadata_block_size: 4096 2026-03-09T00:16:46.837 INFO:journalctl@ceph.mon.c.vm04.stdout: partition_filters: 0 2026-03-09T00:16:46.837 INFO:journalctl@ceph.mon.c.vm04.stdout: use_delta_encoding: 1 2026-03-09T00:16:46.837 INFO:journalctl@ceph.mon.c.vm04.stdout: filter_policy: bloomfilter 2026-03-09T00:16:46.837 INFO:journalctl@ceph.mon.c.vm04.stdout: whole_key_filtering: 1 2026-03-09T00:16:46.837 INFO:journalctl@ceph.mon.c.vm04.stdout: verify_compression: 0 2026-03-09T00:16:46.837 INFO:journalctl@ceph.mon.c.vm04.stdout: read_amp_bytes_per_bit: 0 2026-03-09T00:16:46.837 INFO:journalctl@ceph.mon.c.vm04.stdout: format_version: 5 2026-03-09T00:16:46.837 INFO:journalctl@ceph.mon.c.vm04.stdout: enable_index_compression: 1 2026-03-09T00:16:46.837 INFO:journalctl@ceph.mon.c.vm04.stdout: block_align: 0 2026-03-09T00:16:46.837 INFO:journalctl@ceph.mon.c.vm04.stdout: max_auto_readahead_size: 262144 2026-03-09T00:16:46.837 INFO:journalctl@ceph.mon.c.vm04.stdout: prepopulate_block_cache: 0 2026-03-09T00:16:46.837 INFO:journalctl@ceph.mon.c.vm04.stdout: initial_auto_readahead_size: 8192 2026-03-09T00:16:46.837 INFO:journalctl@ceph.mon.c.vm04.stdout: num_file_reads_for_auto_readahead: 2 2026-03-09T00:16:46.837 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.write_buffer_size: 33554432 2026-03-09T00:16:46.837 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.max_write_buffer_number: 2 2026-03-09T00:16:46.837 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.compression: NoCompression 2026-03-09T00:16:46.837 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.bottommost_compression: Disabled 2026-03-09T00:16:46.837 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.prefix_extractor: nullptr 2026-03-09T00:16:46.837 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.memtable_insert_with_hint_prefix_extractor: nullptr 2026-03-09T00:16:46.837 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.num_levels: 7 2026-03-09T00:16:46.837 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.min_write_buffer_number_to_merge: 1 2026-03-09T00:16:46.837 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.max_write_buffer_number_to_maintain: 0 2026-03-09T00:16:46.837 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.max_write_buffer_size_to_maintain: 0 2026-03-09T00:16:46.837 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.bottommost_compression_opts.window_bits: -14 2026-03-09T00:16:46.837 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.bottommost_compression_opts.level: 32767 2026-03-09T00:16:46.837 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.bottommost_compression_opts.strategy: 0 2026-03-09T00:16:46.837 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.bottommost_compression_opts.max_dict_bytes: 0 2026-03-09T00:16:46.837 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.bottommost_compression_opts.zstd_max_train_bytes: 0 2026-03-09T00:16:46.838 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.bottommost_compression_opts.parallel_threads: 1 2026-03-09T00:16:46.838 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.bottommost_compression_opts.enabled: false 2026-03-09T00:16:46.838 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.bottommost_compression_opts.max_dict_buffer_bytes: 0 2026-03-09T00:16:46.838 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.bottommost_compression_opts.use_zstd_dict_trainer: true 2026-03-09T00:16:46.838 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.compression_opts.window_bits: -14 2026-03-09T00:16:46.838 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.compression_opts.level: 32767 2026-03-09T00:16:46.838 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.compression_opts.strategy: 0 2026-03-09T00:16:46.838 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.compression_opts.max_dict_bytes: 0 2026-03-09T00:16:46.838 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.compression_opts.zstd_max_train_bytes: 0 2026-03-09T00:16:46.838 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.compression_opts.use_zstd_dict_trainer: true 2026-03-09T00:16:46.838 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.compression_opts.parallel_threads: 1 2026-03-09T00:16:46.838 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.compression_opts.enabled: false 2026-03-09T00:16:46.838 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.compression_opts.max_dict_buffer_bytes: 0 2026-03-09T00:16:46.838 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.level0_file_num_compaction_trigger: 4 2026-03-09T00:16:46.838 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.level0_slowdown_writes_trigger: 20 2026-03-09T00:16:46.838 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.level0_stop_writes_trigger: 36 2026-03-09T00:16:46.838 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.target_file_size_base: 67108864 2026-03-09T00:16:46.838 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.target_file_size_multiplier: 1 2026-03-09T00:16:46.838 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.max_bytes_for_level_base: 268435456 2026-03-09T00:16:46.838 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.level_compaction_dynamic_level_bytes: 1 2026-03-09T00:16:46.838 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.max_bytes_for_level_multiplier: 10.000000 2026-03-09T00:16:46.838 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1 2026-03-09T00:16:46.838 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1 2026-03-09T00:16:46.838 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1 2026-03-09T00:16:46.838 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1 2026-03-09T00:16:46.838 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1 2026-03-09T00:16:46.838 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1 2026-03-09T00:16:46.838 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1 2026-03-09T00:16:46.838 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.max_sequential_skip_in_iterations: 8 2026-03-09T00:16:46.838 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.max_compaction_bytes: 1677721600 2026-03-09T00:16:46.838 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.ignore_max_compaction_bytes_for_input: true 2026-03-09T00:16:46.838 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.arena_block_size: 1048576 2026-03-09T00:16:46.838 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.soft_pending_compaction_bytes_limit: 68719476736 2026-03-09T00:16:46.838 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.hard_pending_compaction_bytes_limit: 274877906944 2026-03-09T00:16:46.838 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.disable_auto_compactions: 0 2026-03-09T00:16:46.838 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.compaction_style: kCompactionStyleLevel 2026-03-09T00:16:46.838 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.compaction_pri: kMinOverlappingRatio 2026-03-09T00:16:46.838 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.compaction_options_universal.size_ratio: 1 2026-03-09T00:16:46.838 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.compaction_options_universal.min_merge_width: 2 2026-03-09T00:16:46.838 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295 2026-03-09T00:16:46.838 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200 2026-03-09T00:16:46.838 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1 2026-03-09T00:16:46.838 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize 2026-03-09T00:16:46.839 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824 2026-03-09T00:16:46.839 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0 2026-03-09T00:16:46.839 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0); 2026-03-09T00:16:46.839 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.inplace_update_support: 0 2026-03-09T00:16:46.839 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.inplace_update_num_locks: 10000 2026-03-09T00:16:46.839 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.memtable_prefix_bloom_size_ratio: 0.000000 2026-03-09T00:16:46.839 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.memtable_whole_key_filtering: 0 2026-03-09T00:16:46.839 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.memtable_huge_page_size: 0 2026-03-09T00:16:46.839 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.bloom_locality: 0 2026-03-09T00:16:46.839 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.max_successive_merges: 0 2026-03-09T00:16:46.839 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.optimize_filters_for_hits: 0 2026-03-09T00:16:46.839 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.paranoid_file_checks: 0 2026-03-09T00:16:46.839 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.force_consistency_checks: 1 2026-03-09T00:16:46.839 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.report_bg_io_stats: 0 2026-03-09T00:16:46.839 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.ttl: 2592000 2026-03-09T00:16:46.839 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.periodic_compaction_seconds: 0 2026-03-09T00:16:46.839 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.preclude_last_level_data_seconds: 0 2026-03-09T00:16:46.839 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.preserve_internal_time_seconds: 0 2026-03-09T00:16:46.839 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.enable_blob_files: false 2026-03-09T00:16:46.839 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.min_blob_size: 0 2026-03-09T00:16:46.839 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.blob_file_size: 268435456 2026-03-09T00:16:46.839 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.blob_compression_type: NoCompression 2026-03-09T00:16:46.839 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.enable_blob_garbage_collection: false 2026-03-09T00:16:46.839 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.blob_garbage_collection_age_cutoff: 0.250000 2026-03-09T00:16:46.839 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000 2026-03-09T00:16:46.839 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.blob_compaction_readahead_size: 0 2026-03-09T00:16:46.839 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.blob_file_starting_level: 0 2026-03-09T00:16:46.839 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: Options.experimental_mempurge_threshold: 0.000000 2026-03-09T00:16:46.839 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: [table/block_based/block_based_table_reader.cc:721] At least one SST file opened without unique ID to verify: 45.sst 2026-03-09T00:16:46.839 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed. 2026-03-09T00:16:46.839 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: [db/version_set.cc:5566] Recovered from manifest file:/var/lib/ceph/mon/ceph-c/store.db/MANIFEST-000009 succeeded,manifest_file_number is 9, next_file_number is 47, last_sequence is 25210, log_number is 43,prev_log_number is 0,max_column_family is 0,min_log_number_to_keep is 0 2026-03-09T00:16:46.839 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: [db/version_set.cc:5581] Column family [default] (ID 0), log number is 43 2026-03-09T00:16:46.839 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: [db/db_impl/db_impl_open.cc:539] DB ID: 684527de-bebc-40bd-96e9-188ceaba37a2 2026-03-09T00:16:46.839 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: EVENT_LOG_v1 {"time_micros": 1773015406709337, "job": 1, "event": "recovery_started", "wal_files": [43]} 2026-03-09T00:16:46.839 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: [db/db_impl/db_impl_open.cc:1043] Recovering log #43 mode 2 2026-03-09T00:16:46.839 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: EVENT_LOG_v1 {"time_micros": 1773015406715295, "cf_name": "default", "job": 1, "event": "table_file_creation", "file_number": 48, "file_size": 1084525, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 25215, "largest_seqno": 25450, "table_properties": {"data_size": 1082065, "index_size": 1107, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 325, "raw_key_size": 2925, "raw_average_key_size": 24, "raw_value_size": 1078970, "raw_average_value_size": 9066, "num_data_blocks": 50, "num_entries": 119, "num_filter_entries": 119, "num_deletions": 0, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1773015406, "oldest_key_time": 0, "file_creation_time": 0, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "684527de-bebc-40bd-96e9-188ceaba37a2", "db_session_id": "970WD6WE23VNOSNS1MAC", "orig_file_number": 48, "seqno_to_time_mapping": "N/A"}} 2026-03-09T00:16:46.839 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: EVENT_LOG_v1 {"time_micros": 1773015406715423, "job": 1, "event": "recovery_finished"} 2026-03-09T00:16:46.839 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: [db/version_set.cc:5047] Creating manifest 50 2026-03-09T00:16:46.839 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed. 2026-03-09T00:16:46.839 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-c/store.db/000043.log immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000 2026-03-09T00:16:46.839 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: [db/db_impl/db_impl_open.cc:1987] SstFileManager instance 0x557514d90e00 2026-03-09T00:16:46.839 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: DB pointer 0x557514ea6000 2026-03-09T00:16:46.839 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS ------- 2026-03-09T00:16:46.839 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: rocksdb: [db/db_impl/db_impl.cc:1111] 2026-03-09T00:16:46.839 INFO:journalctl@ceph.mon.c.vm04.stdout: ** DB Stats ** 2026-03-09T00:16:46.839 INFO:journalctl@ceph.mon.c.vm04.stdout: Uptime(secs): 0.0 total, 0.0 interval 2026-03-09T00:16:46.840 INFO:journalctl@ceph.mon.c.vm04.stdout: Cumulative writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 GB, 0.00 MB/s 2026-03-09T00:16:46.840 INFO:journalctl@ceph.mon.c.vm04.stdout: Cumulative WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s 2026-03-09T00:16:46.840 INFO:journalctl@ceph.mon.c.vm04.stdout: Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent 2026-03-09T00:16:46.840 INFO:journalctl@ceph.mon.c.vm04.stdout: Interval writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 MB, 0.00 MB/s 2026-03-09T00:16:46.840 INFO:journalctl@ceph.mon.c.vm04.stdout: Interval WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s 2026-03-09T00:16:46.840 INFO:journalctl@ceph.mon.c.vm04.stdout: Interval stall: 00:00:0.000 H:M:S, 0.0 percent 2026-03-09T00:16:46.840 INFO:journalctl@ceph.mon.c.vm04.stdout: 2026-03-09T00:16:46.840 INFO:journalctl@ceph.mon.c.vm04.stdout: ** Compaction Stats [default] ** 2026-03-09T00:16:46.840 INFO:journalctl@ceph.mon.c.vm04.stdout: Level Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB) 2026-03-09T00:16:46.840 INFO:journalctl@ceph.mon.c.vm04.stdout: ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ 2026-03-09T00:16:46.840 INFO:journalctl@ceph.mon.c.vm04.stdout: L0 1/0 1.03 MB 0.2 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 278.9 0.00 0.00 1 0.004 0 0 0.0 0.0 2026-03-09T00:16:46.840 INFO:journalctl@ceph.mon.c.vm04.stdout: L6 1/0 11.08 MB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.00 0.00 0 0.000 0 0 0.0 0.0 2026-03-09T00:16:46.840 INFO:journalctl@ceph.mon.c.vm04.stdout: Sum 2/0 12.12 MB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 278.9 0.00 0.00 1 0.004 0 0 0.0 0.0 2026-03-09T00:16:46.840 INFO:journalctl@ceph.mon.c.vm04.stdout: Int 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 278.9 0.00 0.00 1 0.004 0 0 0.0 0.0 2026-03-09T00:16:46.840 INFO:journalctl@ceph.mon.c.vm04.stdout: 2026-03-09T00:16:46.840 INFO:journalctl@ceph.mon.c.vm04.stdout: ** Compaction Stats [default] ** 2026-03-09T00:16:46.840 INFO:journalctl@ceph.mon.c.vm04.stdout: Priority Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB) 2026-03-09T00:16:46.840 INFO:journalctl@ceph.mon.c.vm04.stdout: --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 2026-03-09T00:16:46.840 INFO:journalctl@ceph.mon.c.vm04.stdout: User 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 278.9 0.00 0.00 1 0.004 0 0 0.0 0.0 2026-03-09T00:16:46.840 INFO:journalctl@ceph.mon.c.vm04.stdout: 2026-03-09T00:16:46.840 INFO:journalctl@ceph.mon.c.vm04.stdout: Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0 2026-03-09T00:16:46.840 INFO:journalctl@ceph.mon.c.vm04.stdout: 2026-03-09T00:16:46.840 INFO:journalctl@ceph.mon.c.vm04.stdout: Uptime(secs): 0.0 total, 0.0 interval 2026-03-09T00:16:46.840 INFO:journalctl@ceph.mon.c.vm04.stdout: Flush(GB): cumulative 0.001, interval 0.001 2026-03-09T00:16:46.840 INFO:journalctl@ceph.mon.c.vm04.stdout: AddFile(GB): cumulative 0.000, interval 0.000 2026-03-09T00:16:46.840 INFO:journalctl@ceph.mon.c.vm04.stdout: AddFile(Total Files): cumulative 0, interval 0 2026-03-09T00:16:46.840 INFO:journalctl@ceph.mon.c.vm04.stdout: AddFile(L0 Files): cumulative 0, interval 0 2026-03-09T00:16:46.840 INFO:journalctl@ceph.mon.c.vm04.stdout: AddFile(Keys): cumulative 0, interval 0 2026-03-09T00:16:46.840 INFO:journalctl@ceph.mon.c.vm04.stdout: Cumulative compaction: 0.00 GB write, 49.98 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-09T00:16:46.840 INFO:journalctl@ceph.mon.c.vm04.stdout: Interval compaction: 0.00 GB write, 49.98 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-09T00:16:46.840 INFO:journalctl@ceph.mon.c.vm04.stdout: Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count 2026-03-09T00:16:46.840 INFO:journalctl@ceph.mon.c.vm04.stdout: Block cache BinnedLRUCache@0x557514d8f350#2 capacity: 512.00 MB usage: 1.58 KB table_size: 0 occupancy: 18446744073709551615 collections: 1 last_copies: 0 last_secs: 8e-06 secs_since: 0 2026-03-09T00:16:46.840 INFO:journalctl@ceph.mon.c.vm04.stdout: Block cache entry stats(count,size,portion): FilterBlock(1,0.38 KB,7.15256e-05%) IndexBlock(1,1.20 KB,0.000229478%) Misc(1,0.00 KB,0%) 2026-03-09T00:16:46.840 INFO:journalctl@ceph.mon.c.vm04.stdout: 2026-03-09T00:16:46.840 INFO:journalctl@ceph.mon.c.vm04.stdout: ** File Read Latency Histogram By Level [default] ** 2026-03-09T00:16:46.840 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: starting mon.c rank 1 at public addrs [v2:192.168.123.104:3301/0,v1:192.168.123.104:6790/0] at bind addrs [v2:192.168.123.104:3301/0,v1:192.168.123.104:6790/0] mon_data /var/lib/ceph/mon/ceph-c fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 2026-03-09T00:16:46.840 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: mon.c@-1(???) e3 preinit fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 2026-03-09T00:16:46.840 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: mon.c@-1(???).mds e1 new map 2026-03-09T00:16:46.840 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: mon.c@-1(???).mds e1 print_map 2026-03-09T00:16:46.840 INFO:journalctl@ceph.mon.c.vm04.stdout: e1 2026-03-09T00:16:46.840 INFO:journalctl@ceph.mon.c.vm04.stdout: btime 1970-01-01T00:00:00:000000+0000 2026-03-09T00:16:46.840 INFO:journalctl@ceph.mon.c.vm04.stdout: enable_multiple, ever_enabled_multiple: 1,1 2026-03-09T00:16:46.840 INFO:journalctl@ceph.mon.c.vm04.stdout: default compat: compat={},rocompat={},incompat={1=base v0.20,2=client writeable ranges,3=default file layouts on dirs,4=dir inode in separate object,5=mds uses versioned encoding,6=dirfrag is stored in omap,8=no anchor table,9=file layout v2,10=snaprealm v2} 2026-03-09T00:16:46.840 INFO:journalctl@ceph.mon.c.vm04.stdout: legacy client fscid: -1 2026-03-09T00:16:46.840 INFO:journalctl@ceph.mon.c.vm04.stdout: 2026-03-09T00:16:46.840 INFO:journalctl@ceph.mon.c.vm04.stdout: No filesystems configured 2026-03-09T00:16:46.840 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: mon.c@-1(???).osd e97 crush map has features 3314933000854323200, adjusting msgr requires 2026-03-09T00:16:46.840 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: mon.c@-1(???).osd e97 crush map has features 432629239337189376, adjusting msgr requires 2026-03-09T00:16:46.840 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: mon.c@-1(???).osd e97 crush map has features 432629239337189376, adjusting msgr requires 2026-03-09T00:16:46.840 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: mon.c@-1(???).osd e97 crush map has features 432629239337189376, adjusting msgr requires 2026-03-09T00:16:46.840 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: mon.c@-1(???).paxosservice(auth 1..25) refresh upgraded, format 0 -> 3 2026-03-09T00:16:47.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[94619]: Updating vm10:/var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/config/ceph.conf 2026-03-09T00:16:47.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[94619]: Updating vm04:/var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/config/ceph.conf 2026-03-09T00:16:47.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[94619]: Updating vm10:/etc/ceph/ceph.client.admin.keyring 2026-03-09T00:16:47.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[94619]: Updating vm04:/etc/ceph/ceph.client.admin.keyring 2026-03-09T00:16:47.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[94619]: Updating vm10:/var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/config/ceph.client.admin.keyring 2026-03-09T00:16:47.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[94619]: Updating vm04:/var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/config/ceph.client.admin.keyring 2026-03-09T00:16:47.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:47.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:47.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:47.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:47.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:47.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:16:47.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:47.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "quorum_status"}]: dispatch 2026-03-09T00:16:47.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "mon ok-to-stop", "ids": ["c"]}]: dispatch 2026-03-09T00:16:47.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[94619]: Upgrade: It appears safe to stop mon.c 2026-03-09T00:16:47.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[94619]: Upgrade: Updating mon.c 2026-03-09T00:16:47.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:47.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-09T00:16:47.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-09T00:16:47.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:16:47.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[94619]: Deploying daemon mon.c on vm04 2026-03-09T00:16:47.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: Updating vm10:/var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/config/ceph.conf 2026-03-09T00:16:47.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: Updating vm04:/var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/config/ceph.conf 2026-03-09T00:16:47.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: Updating vm10:/etc/ceph/ceph.client.admin.keyring 2026-03-09T00:16:47.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: Updating vm04:/etc/ceph/ceph.client.admin.keyring 2026-03-09T00:16:47.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: Updating vm10:/var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/config/ceph.client.admin.keyring 2026-03-09T00:16:47.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: Updating vm04:/var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/config/ceph.client.admin.keyring 2026-03-09T00:16:47.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:47.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:47.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:47.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:47.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:47.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:16:47.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:47.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "quorum_status"}]: dispatch 2026-03-09T00:16:47.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "mon ok-to-stop", "ids": ["c"]}]: dispatch 2026-03-09T00:16:47.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: Upgrade: It appears safe to stop mon.c 2026-03-09T00:16:47.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: Upgrade: Updating mon.c 2026-03-09T00:16:47.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:47.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-09T00:16:47.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-09T00:16:47.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:16:47.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:46 vm04 ceph-mon[96438]: Deploying daemon mon.c on vm04 2026-03-09T00:16:47.103 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:16:47 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:16:47.001Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:16:47.103 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:16:47 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:16:47.002Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:16:47.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:46 vm10 ceph-mon[82076]: Updating vm10:/var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/config/ceph.conf 2026-03-09T00:16:47.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:46 vm10 ceph-mon[82076]: Updating vm04:/var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/config/ceph.conf 2026-03-09T00:16:47.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:46 vm10 ceph-mon[82076]: Updating vm10:/etc/ceph/ceph.client.admin.keyring 2026-03-09T00:16:47.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:46 vm10 ceph-mon[82076]: Updating vm04:/etc/ceph/ceph.client.admin.keyring 2026-03-09T00:16:47.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:46 vm10 ceph-mon[82076]: Updating vm10:/var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/config/ceph.client.admin.keyring 2026-03-09T00:16:47.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:46 vm10 ceph-mon[82076]: Updating vm04:/var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/config/ceph.client.admin.keyring 2026-03-09T00:16:47.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:46 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:47.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:46 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:47.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:46 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:47.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:46 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:47.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:46 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:47.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:46 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:16:47.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:46 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:47.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:46 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "quorum_status"}]: dispatch 2026-03-09T00:16:47.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:46 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "mon ok-to-stop", "ids": ["c"]}]: dispatch 2026-03-09T00:16:47.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:46 vm10 ceph-mon[82076]: Upgrade: It appears safe to stop mon.c 2026-03-09T00:16:47.329 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:46 vm10 ceph-mon[82076]: Upgrade: Updating mon.c 2026-03-09T00:16:47.329 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:46 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:47.329 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:46 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-09T00:16:47.329 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:46 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-09T00:16:47.329 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:46 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:16:47.329 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:46 vm10 ceph-mon[82076]: Deploying daemon mon.c on vm04 2026-03-09T00:16:47.974 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:47 vm04 ceph-mon[94619]: mon.c calling monitor election 2026-03-09T00:16:47.974 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:47 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T00:16:47.974 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:47 vm04 ceph-mon[94619]: mon.b calling monitor election 2026-03-09T00:16:47.974 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:47 vm04 ceph-mon[94619]: mon.a calling monitor election 2026-03-09T00:16:47.974 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:47 vm04 ceph-mon[94619]: mon.a is new leader, mons a,c,b in quorum (ranks 0,1,2) 2026-03-09T00:16:47.974 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:47 vm04 ceph-mon[94619]: monmap epoch 4 2026-03-09T00:16:47.974 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:47 vm04 ceph-mon[94619]: fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 2026-03-09T00:16:47.974 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:47 vm04 ceph-mon[94619]: last_changed 2026-03-09T00:16:46.836593+0000 2026-03-09T00:16:47.974 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:47 vm04 ceph-mon[94619]: created 2026-03-08T23:53:57.979597+0000 2026-03-09T00:16:47.974 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:47 vm04 ceph-mon[94619]: min_mon_release 19 (squid) 2026-03-09T00:16:47.974 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:47 vm04 ceph-mon[94619]: election_strategy: 1 2026-03-09T00:16:47.974 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:47 vm04 ceph-mon[94619]: 0: [v2:192.168.123.104:3300/0,v1:192.168.123.104:6789/0] mon.a 2026-03-09T00:16:47.974 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:47 vm04 ceph-mon[94619]: 1: [v2:192.168.123.104:3301/0,v1:192.168.123.104:6790/0] mon.c 2026-03-09T00:16:47.974 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:47 vm04 ceph-mon[94619]: 2: [v2:192.168.123.110:3300/0,v1:192.168.123.110:6789/0] mon.b 2026-03-09T00:16:47.974 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:47 vm04 ceph-mon[94619]: fsmap 2026-03-09T00:16:47.974 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:47 vm04 ceph-mon[94619]: osdmap e97: 8 total, 8 up, 8 in 2026-03-09T00:16:47.974 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:47 vm04 ceph-mon[94619]: mgrmap e47: y(active, since 5s), standbys: x 2026-03-09T00:16:47.974 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:47 vm04 ceph-mon[94619]: overall HEALTH_OK 2026-03-09T00:16:47.974 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:47 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:47.974 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:47 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T00:16:47.974 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:47 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T00:16:47.974 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:47 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:47.974 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:47 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:16:47.975 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:47 vm04 ceph-mon[96438]: mon.c calling monitor election 2026-03-09T00:16:47.975 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:47 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T00:16:47.975 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:47 vm04 ceph-mon[96438]: mon.b calling monitor election 2026-03-09T00:16:47.975 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:47 vm04 ceph-mon[96438]: mon.a calling monitor election 2026-03-09T00:16:47.975 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:47 vm04 ceph-mon[96438]: mon.a is new leader, mons a,c,b in quorum (ranks 0,1,2) 2026-03-09T00:16:47.975 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:47 vm04 ceph-mon[96438]: monmap epoch 4 2026-03-09T00:16:47.975 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:47 vm04 ceph-mon[96438]: fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 2026-03-09T00:16:47.975 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:47 vm04 ceph-mon[96438]: last_changed 2026-03-09T00:16:46.836593+0000 2026-03-09T00:16:47.975 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:47 vm04 ceph-mon[96438]: created 2026-03-08T23:53:57.979597+0000 2026-03-09T00:16:47.975 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:47 vm04 ceph-mon[96438]: min_mon_release 19 (squid) 2026-03-09T00:16:47.975 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:47 vm04 ceph-mon[96438]: election_strategy: 1 2026-03-09T00:16:47.975 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:47 vm04 ceph-mon[96438]: 0: [v2:192.168.123.104:3300/0,v1:192.168.123.104:6789/0] mon.a 2026-03-09T00:16:47.975 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:47 vm04 ceph-mon[96438]: 1: [v2:192.168.123.104:3301/0,v1:192.168.123.104:6790/0] mon.c 2026-03-09T00:16:47.975 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:47 vm04 ceph-mon[96438]: 2: [v2:192.168.123.110:3300/0,v1:192.168.123.110:6789/0] mon.b 2026-03-09T00:16:47.975 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:47 vm04 ceph-mon[96438]: fsmap 2026-03-09T00:16:47.975 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:47 vm04 ceph-mon[96438]: osdmap e97: 8 total, 8 up, 8 in 2026-03-09T00:16:47.975 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:47 vm04 ceph-mon[96438]: mgrmap e47: y(active, since 5s), standbys: x 2026-03-09T00:16:47.975 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:47 vm04 ceph-mon[96438]: overall HEALTH_OK 2026-03-09T00:16:47.975 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:47 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:47.975 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:47 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T00:16:47.975 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:47 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T00:16:47.975 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:47 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:47.975 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:47 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:16:48.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:47 vm10 ceph-mon[82076]: mon.c calling monitor election 2026-03-09T00:16:48.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:47 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T00:16:48.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:47 vm10 ceph-mon[82076]: mon.b calling monitor election 2026-03-09T00:16:48.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:47 vm10 ceph-mon[82076]: mon.a calling monitor election 2026-03-09T00:16:48.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:47 vm10 ceph-mon[82076]: mon.a is new leader, mons a,c,b in quorum (ranks 0,1,2) 2026-03-09T00:16:48.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:47 vm10 ceph-mon[82076]: monmap epoch 4 2026-03-09T00:16:48.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:47 vm10 ceph-mon[82076]: fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 2026-03-09T00:16:48.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:47 vm10 ceph-mon[82076]: last_changed 2026-03-09T00:16:46.836593+0000 2026-03-09T00:16:48.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:47 vm10 ceph-mon[82076]: created 2026-03-08T23:53:57.979597+0000 2026-03-09T00:16:48.329 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:47 vm10 ceph-mon[82076]: min_mon_release 19 (squid) 2026-03-09T00:16:48.329 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:47 vm10 ceph-mon[82076]: election_strategy: 1 2026-03-09T00:16:48.329 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:47 vm10 ceph-mon[82076]: 0: [v2:192.168.123.104:3300/0,v1:192.168.123.104:6789/0] mon.a 2026-03-09T00:16:48.329 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:47 vm10 ceph-mon[82076]: 1: [v2:192.168.123.104:3301/0,v1:192.168.123.104:6790/0] mon.c 2026-03-09T00:16:48.329 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:47 vm10 ceph-mon[82076]: 2: [v2:192.168.123.110:3300/0,v1:192.168.123.110:6789/0] mon.b 2026-03-09T00:16:48.329 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:47 vm10 ceph-mon[82076]: fsmap 2026-03-09T00:16:48.329 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:47 vm10 ceph-mon[82076]: osdmap e97: 8 total, 8 up, 8 in 2026-03-09T00:16:48.329 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:47 vm10 ceph-mon[82076]: mgrmap e47: y(active, since 5s), standbys: x 2026-03-09T00:16:48.329 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:47 vm10 ceph-mon[82076]: overall HEALTH_OK 2026-03-09T00:16:48.329 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:47 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:48.329 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:47 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T00:16:48.329 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:47 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T00:16:48.329 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:47 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:48.329 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:47 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:16:49.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:48 vm04 ceph-mon[94619]: pgmap v6: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail 2026-03-09T00:16:49.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:48 vm04 ceph-mon[94619]: mgrmap e48: y(active, since 6s), standbys: x 2026-03-09T00:16:49.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:48 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:49.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:48 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:49.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:48 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:49.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:48 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:49.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:48 vm04 ceph-mon[96438]: pgmap v6: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail 2026-03-09T00:16:49.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:48 vm04 ceph-mon[96438]: mgrmap e48: y(active, since 6s), standbys: x 2026-03-09T00:16:49.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:48 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:49.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:48 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:49.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:48 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:49.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:48 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:49.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:48 vm10 ceph-mon[82076]: pgmap v6: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail 2026-03-09T00:16:49.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:48 vm10 ceph-mon[82076]: mgrmap e48: y(active, since 6s), standbys: x 2026-03-09T00:16:49.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:48 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:49.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:48 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:49.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:48 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:49.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:48 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:50.436 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:50 vm04 ceph-mon[94619]: Detected new or changed devices on vm04 2026-03-09T00:16:50.436 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:50 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:50.436 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:50 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:50.436 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:50 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:16:50.436 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:50 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:16:50.436 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:50 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:50.436 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:50 vm04 ceph-mon[94619]: Reconfiguring mon.a (monmap changed)... 2026-03-09T00:16:50.436 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:50 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-09T00:16:50.436 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:50 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-09T00:16:50.436 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:50 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:16:50.436 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:50 vm04 ceph-mon[94619]: Reconfiguring daemon mon.a on vm04 2026-03-09T00:16:50.436 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:50 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:50.436 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:50 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:50.436 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:50 vm04 ceph-mon[94619]: Reconfiguring mgr.y (monmap changed)... 2026-03-09T00:16:50.436 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:50 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.y", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-09T00:16:50.436 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:50 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-09T00:16:50.436 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:50 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:16:50.436 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:50 vm04 ceph-mon[94619]: Reconfiguring daemon mgr.y on vm04 2026-03-09T00:16:50.436 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:50 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:50.436 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:50 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:50.436 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:50 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-09T00:16:50.437 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:50 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-09T00:16:50.437 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:50 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:16:50.437 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:50 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:50.437 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:50 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:50.437 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:50 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.0"}]: dispatch 2026-03-09T00:16:50.437 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:50 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:16:50.437 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:50 vm04 ceph-mon[96438]: Detected new or changed devices on vm04 2026-03-09T00:16:50.437 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:50 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:50.437 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:50 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:50.437 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:50 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:16:50.437 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:50 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:16:50.437 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:50 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:50.437 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:50 vm04 ceph-mon[96438]: Reconfiguring mon.a (monmap changed)... 2026-03-09T00:16:50.437 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:50 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-09T00:16:50.437 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:50 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-09T00:16:50.437 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:50 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:16:50.437 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:50 vm04 ceph-mon[96438]: Reconfiguring daemon mon.a on vm04 2026-03-09T00:16:50.437 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:50 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:50.437 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:50 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:50.437 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:50 vm04 ceph-mon[96438]: Reconfiguring mgr.y (monmap changed)... 2026-03-09T00:16:50.437 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:50 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.y", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-09T00:16:50.437 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:50 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-09T00:16:50.437 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:50 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:16:50.437 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:50 vm04 ceph-mon[96438]: Reconfiguring daemon mgr.y on vm04 2026-03-09T00:16:50.437 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:50 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:50.437 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:50 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:50.437 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:50 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-09T00:16:50.437 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:50 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-09T00:16:50.437 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:50 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:16:50.437 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:50 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:50.437 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:50 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:50.437 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:50 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.0"}]: dispatch 2026-03-09T00:16:50.437 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:50 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:16:50.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:50 vm10 ceph-mon[82076]: Detected new or changed devices on vm04 2026-03-09T00:16:50.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:50 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:50.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:50 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:50.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:50 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:16:50.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:50 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:16:50.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:50 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:50.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:50 vm10 ceph-mon[82076]: Reconfiguring mon.a (monmap changed)... 2026-03-09T00:16:50.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:50 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-09T00:16:50.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:50 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-09T00:16:50.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:50 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:16:50.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:50 vm10 ceph-mon[82076]: Reconfiguring daemon mon.a on vm04 2026-03-09T00:16:50.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:50 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:50.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:50 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:50.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:50 vm10 ceph-mon[82076]: Reconfiguring mgr.y (monmap changed)... 2026-03-09T00:16:50.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:50 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.y", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-09T00:16:50.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:50 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-09T00:16:50.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:50 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:16:50.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:50 vm10 ceph-mon[82076]: Reconfiguring daemon mgr.y on vm04 2026-03-09T00:16:50.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:50 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:50.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:50 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:50.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:50 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-09T00:16:50.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:50 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-09T00:16:50.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:50 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:16:50.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:50 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:50.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:50 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:50.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:50 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.0"}]: dispatch 2026-03-09T00:16:50.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:50 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:16:51.550 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:51 vm04 ceph-mon[94619]: pgmap v7: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 26 KiB/s rd, 0 B/s wr, 11 op/s 2026-03-09T00:16:51.550 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:51 vm04 ceph-mon[94619]: Reconfiguring mon.c (monmap changed)... 2026-03-09T00:16:51.550 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:51 vm04 ceph-mon[94619]: Reconfiguring daemon mon.c on vm04 2026-03-09T00:16:51.550 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:51 vm04 ceph-mon[94619]: Reconfiguring osd.0 (monmap changed)... 2026-03-09T00:16:51.550 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:51 vm04 ceph-mon[94619]: Reconfiguring daemon osd.0 on vm04 2026-03-09T00:16:51.550 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:51 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:51.550 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:51 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:51.550 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:51 vm04 ceph-mon[94619]: Reconfiguring osd.1 (monmap changed)... 2026-03-09T00:16:51.550 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:51 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.1"}]: dispatch 2026-03-09T00:16:51.550 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:51 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:16:51.550 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:51 vm04 ceph-mon[94619]: Reconfiguring daemon osd.1 on vm04 2026-03-09T00:16:51.550 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:51 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:51.550 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:51 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:51.551 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:51 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.2"}]: dispatch 2026-03-09T00:16:51.551 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:51 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:16:51.551 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:51 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:51.551 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:51 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:51.551 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:51 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.3"}]: dispatch 2026-03-09T00:16:51.551 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:51 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:16:51.551 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:51 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:51.551 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:51 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:51.551 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:51 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm04.ehrfsf", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-09T00:16:51.551 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:51 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:16:51.551 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:51 vm04 ceph-mon[96438]: pgmap v7: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 26 KiB/s rd, 0 B/s wr, 11 op/s 2026-03-09T00:16:51.551 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:51 vm04 ceph-mon[96438]: Reconfiguring mon.c (monmap changed)... 2026-03-09T00:16:51.551 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:51 vm04 ceph-mon[96438]: Reconfiguring daemon mon.c on vm04 2026-03-09T00:16:51.551 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:51 vm04 ceph-mon[96438]: Reconfiguring osd.0 (monmap changed)... 2026-03-09T00:16:51.551 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:51 vm04 ceph-mon[96438]: Reconfiguring daemon osd.0 on vm04 2026-03-09T00:16:51.551 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:51 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:51.551 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:51 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:51.551 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:51 vm04 ceph-mon[96438]: Reconfiguring osd.1 (monmap changed)... 2026-03-09T00:16:51.551 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:51 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.1"}]: dispatch 2026-03-09T00:16:51.551 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:51 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:16:51.551 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:51 vm04 ceph-mon[96438]: Reconfiguring daemon osd.1 on vm04 2026-03-09T00:16:51.551 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:51 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:51.551 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:51 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:51.551 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:51 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.2"}]: dispatch 2026-03-09T00:16:51.551 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:51 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:16:51.551 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:51 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:51.551 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:51 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:51.551 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:51 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.3"}]: dispatch 2026-03-09T00:16:51.551 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:51 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:16:51.551 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:51 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:51.551 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:51 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:51.551 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:51 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm04.ehrfsf", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-09T00:16:51.551 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:51 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:16:51.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:51 vm10 ceph-mon[82076]: pgmap v7: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 26 KiB/s rd, 0 B/s wr, 11 op/s 2026-03-09T00:16:51.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:51 vm10 ceph-mon[82076]: Reconfiguring mon.c (monmap changed)... 2026-03-09T00:16:51.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:51 vm10 ceph-mon[82076]: Reconfiguring daemon mon.c on vm04 2026-03-09T00:16:51.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:51 vm10 ceph-mon[82076]: Reconfiguring osd.0 (monmap changed)... 2026-03-09T00:16:51.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:51 vm10 ceph-mon[82076]: Reconfiguring daemon osd.0 on vm04 2026-03-09T00:16:51.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:51 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:51.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:51 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:51.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:51 vm10 ceph-mon[82076]: Reconfiguring osd.1 (monmap changed)... 2026-03-09T00:16:51.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:51 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.1"}]: dispatch 2026-03-09T00:16:51.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:51 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:16:51.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:51 vm10 ceph-mon[82076]: Reconfiguring daemon osd.1 on vm04 2026-03-09T00:16:51.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:51 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:51.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:51 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:51.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:51 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.2"}]: dispatch 2026-03-09T00:16:51.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:51 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:16:51.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:51 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:51.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:51 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:51.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:51 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.3"}]: dispatch 2026-03-09T00:16:51.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:51 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:16:51.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:51 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:51.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:51 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:51.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:51 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm04.ehrfsf", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-09T00:16:51.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:51 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:16:51.850 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:16:51 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: 2026-03-09T00:16:51.724+0000 7fe15b141640 -1 mgr.server handle_report got status from non-daemon mon.c 2026-03-09T00:16:52.661 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:52 vm10 ceph-mon[82076]: Reconfiguring osd.2 (monmap changed)... 2026-03-09T00:16:52.661 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:52 vm10 ceph-mon[82076]: Reconfiguring daemon osd.2 on vm04 2026-03-09T00:16:52.661 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:52 vm10 ceph-mon[82076]: Reconfiguring osd.3 (monmap changed)... 2026-03-09T00:16:52.661 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:52 vm10 ceph-mon[82076]: Reconfiguring daemon osd.3 on vm04 2026-03-09T00:16:52.661 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:52 vm10 ceph-mon[82076]: Reconfiguring rgw.foo.vm04.ehrfsf (monmap changed)... 2026-03-09T00:16:52.661 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:52 vm10 ceph-mon[82076]: Reconfiguring daemon rgw.foo.vm04.ehrfsf on vm04 2026-03-09T00:16:52.661 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:52 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:52.661 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:52 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:52.661 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:52 vm10 ceph-mon[82076]: Reconfiguring mon.b (monmap changed)... 2026-03-09T00:16:52.661 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:52 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-09T00:16:52.661 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:52 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-09T00:16:52.661 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:52 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:16:52.661 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:52 vm10 ceph-mon[82076]: Reconfiguring daemon mon.b on vm10 2026-03-09T00:16:52.661 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:52 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:52.661 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:52 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:52.661 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:52 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-09T00:16:52.661 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:52 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-09T00:16:52.661 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:52 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:16:52.661 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:52 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:52.661 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:52 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:52.661 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:52 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.4"}]: dispatch 2026-03-09T00:16:52.661 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:52 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:16:52.661 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:52 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:52.661 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:52 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:52.661 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:52 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.5"}]: dispatch 2026-03-09T00:16:52.661 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:52 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:16:53.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:52 vm04 ceph-mon[94619]: Reconfiguring osd.2 (monmap changed)... 2026-03-09T00:16:53.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:52 vm04 ceph-mon[94619]: Reconfiguring daemon osd.2 on vm04 2026-03-09T00:16:53.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:52 vm04 ceph-mon[94619]: Reconfiguring osd.3 (monmap changed)... 2026-03-09T00:16:53.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:52 vm04 ceph-mon[94619]: Reconfiguring daemon osd.3 on vm04 2026-03-09T00:16:53.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:52 vm04 ceph-mon[94619]: Reconfiguring rgw.foo.vm04.ehrfsf (monmap changed)... 2026-03-09T00:16:53.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:52 vm04 ceph-mon[94619]: Reconfiguring daemon rgw.foo.vm04.ehrfsf on vm04 2026-03-09T00:16:53.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:52 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:53.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:52 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:53.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:52 vm04 ceph-mon[94619]: Reconfiguring mon.b (monmap changed)... 2026-03-09T00:16:53.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:52 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-09T00:16:53.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:52 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-09T00:16:53.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:52 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:16:53.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:52 vm04 ceph-mon[94619]: Reconfiguring daemon mon.b on vm10 2026-03-09T00:16:53.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:52 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:53.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:52 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:53.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:52 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-09T00:16:53.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:52 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-09T00:16:53.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:52 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:16:53.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:52 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:53.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:52 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:53.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:52 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.4"}]: dispatch 2026-03-09T00:16:53.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:52 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:16:53.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:52 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:53.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:52 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:53.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:52 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.5"}]: dispatch 2026-03-09T00:16:53.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:52 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:16:53.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:52 vm04 ceph-mon[96438]: Reconfiguring osd.2 (monmap changed)... 2026-03-09T00:16:53.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:52 vm04 ceph-mon[96438]: Reconfiguring daemon osd.2 on vm04 2026-03-09T00:16:53.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:52 vm04 ceph-mon[96438]: Reconfiguring osd.3 (monmap changed)... 2026-03-09T00:16:53.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:52 vm04 ceph-mon[96438]: Reconfiguring daemon osd.3 on vm04 2026-03-09T00:16:53.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:52 vm04 ceph-mon[96438]: Reconfiguring rgw.foo.vm04.ehrfsf (monmap changed)... 2026-03-09T00:16:53.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:52 vm04 ceph-mon[96438]: Reconfiguring daemon rgw.foo.vm04.ehrfsf on vm04 2026-03-09T00:16:53.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:52 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:53.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:52 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:53.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:52 vm04 ceph-mon[96438]: Reconfiguring mon.b (monmap changed)... 2026-03-09T00:16:53.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:52 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-09T00:16:53.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:52 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-09T00:16:53.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:52 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:16:53.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:52 vm04 ceph-mon[96438]: Reconfiguring daemon mon.b on vm10 2026-03-09T00:16:53.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:52 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:53.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:52 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:53.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:52 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-09T00:16:53.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:52 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-09T00:16:53.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:52 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:16:53.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:52 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:53.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:52 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:53.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:52 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.4"}]: dispatch 2026-03-09T00:16:53.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:52 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:16:53.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:52 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:53.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:52 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:53.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:52 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.5"}]: dispatch 2026-03-09T00:16:53.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:52 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:16:53.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: pgmap v8: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 20 KiB/s rd, 0 B/s wr, 8 op/s 2026-03-09T00:16:53.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: Reconfiguring mgr.x (monmap changed)... 2026-03-09T00:16:53.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: Reconfiguring daemon mgr.x on vm10 2026-03-09T00:16:53.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: Reconfiguring osd.4 (monmap changed)... 2026-03-09T00:16:53.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: Reconfiguring daemon osd.4 on vm10 2026-03-09T00:16:53.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: Reconfiguring osd.5 (monmap changed)... 2026-03-09T00:16:53.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: Reconfiguring daemon osd.5 on vm10 2026-03-09T00:16:53.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:53.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:53.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.6"}]: dispatch 2026-03-09T00:16:53.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:16:53.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:53.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:53.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.7"}]: dispatch 2026-03-09T00:16:53.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:16:53.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:53.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:53.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm10.dwizvi", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-09T00:16:53.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:16:53.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:53.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:53.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:16:53.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:53.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:53.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:53.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-09T00:16:53.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]': finished 2026-03-09T00:16:53.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-09T00:16:53.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]': finished 2026-03-09T00:16:53.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:53.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:53.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:53.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:53.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:53.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:53.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:53.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-09T00:16:53.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-09T00:16:53.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:53.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:53.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:53.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:53.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:53.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:53.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:53.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-09T00:16:53.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-09T00:16:53.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:53.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-09T00:16:53.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-09T00:16:53.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:53.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-09T00:16:53.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-09T00:16:53.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:53.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-09T00:16:53.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-09T00:16:53.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:53.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-09T00:16:53.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-09T00:16:53.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:53.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-09T00:16:53.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-09T00:16:53.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]: dispatch 2026-03-09T00:16:53.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]': finished 2026-03-09T00:16:53.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:16:53.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon"}]': finished 2026-03-09T00:16:53.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]: dispatch 2026-03-09T00:16:53.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]': finished 2026-03-09T00:16:53.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd"}]: dispatch 2026-03-09T00:16:53.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mds"}]: dispatch 2026-03-09T00:16:53.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mds"}]': finished 2026-03-09T00:16:53.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw"}]: dispatch 2026-03-09T00:16:53.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]: dispatch 2026-03-09T00:16:53.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]': finished 2026-03-09T00:16:53.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:16:53.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]: dispatch 2026-03-09T00:16:53.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]': finished 2026-03-09T00:16:53.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.iscsi"}]: dispatch 2026-03-09T00:16:53.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]: dispatch 2026-03-09T00:16:53.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]': finished 2026-03-09T00:16:53.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]: dispatch 2026-03-09T00:16:53.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]': finished 2026-03-09T00:16:53.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:16:53.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:16:53.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:16:53.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:16:53.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:16:53.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:16:53.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]: dispatch 2026-03-09T00:16:53.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]': finished 2026-03-09T00:16:53.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:16:53.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:16:53.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:16:53.830 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:53 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:54.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: pgmap v8: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 20 KiB/s rd, 0 B/s wr, 8 op/s 2026-03-09T00:16:54.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: Reconfiguring mgr.x (monmap changed)... 2026-03-09T00:16:54.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: Reconfiguring daemon mgr.x on vm10 2026-03-09T00:16:54.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: Reconfiguring osd.4 (monmap changed)... 2026-03-09T00:16:54.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: Reconfiguring daemon osd.4 on vm10 2026-03-09T00:16:54.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: Reconfiguring osd.5 (monmap changed)... 2026-03-09T00:16:54.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: Reconfiguring daemon osd.5 on vm10 2026-03-09T00:16:54.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:54.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:54.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.6"}]: dispatch 2026-03-09T00:16:54.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:16:54.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:54.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:54.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.7"}]: dispatch 2026-03-09T00:16:54.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:16:54.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:54.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:54.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm10.dwizvi", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-09T00:16:54.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:16:54.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:54.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:54.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:16:54.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:54.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:54.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:54.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-09T00:16:54.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]': finished 2026-03-09T00:16:54.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-09T00:16:54.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]': finished 2026-03-09T00:16:54.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:54.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:54.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:54.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:54.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:54.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:54.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:54.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-09T00:16:54.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-09T00:16:54.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:54.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:54.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:54.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:54.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:54.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:54.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:54.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-09T00:16:54.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-09T00:16:54.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:54.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-09T00:16:54.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-09T00:16:54.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:54.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-09T00:16:54.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-09T00:16:54.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:54.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-09T00:16:54.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-09T00:16:54.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:54.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-09T00:16:54.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-09T00:16:54.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:54.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-09T00:16:54.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-09T00:16:54.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]: dispatch 2026-03-09T00:16:54.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]': finished 2026-03-09T00:16:54.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:16:54.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon"}]': finished 2026-03-09T00:16:54.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]: dispatch 2026-03-09T00:16:54.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]': finished 2026-03-09T00:16:54.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd"}]: dispatch 2026-03-09T00:16:54.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mds"}]: dispatch 2026-03-09T00:16:54.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mds"}]': finished 2026-03-09T00:16:54.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw"}]: dispatch 2026-03-09T00:16:54.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]: dispatch 2026-03-09T00:16:54.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]': finished 2026-03-09T00:16:54.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:16:54.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]: dispatch 2026-03-09T00:16:54.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]': finished 2026-03-09T00:16:54.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.iscsi"}]: dispatch 2026-03-09T00:16:54.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]: dispatch 2026-03-09T00:16:54.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]': finished 2026-03-09T00:16:54.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]: dispatch 2026-03-09T00:16:54.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]': finished 2026-03-09T00:16:54.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:16:54.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:16:54.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:16:54.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:16:54.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:16:54.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:16:54.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]: dispatch 2026-03-09T00:16:54.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]': finished 2026-03-09T00:16:54.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:16:54.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:16:54.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:16:54.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:54.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: pgmap v8: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 20 KiB/s rd, 0 B/s wr, 8 op/s 2026-03-09T00:16:54.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: Reconfiguring mgr.x (monmap changed)... 2026-03-09T00:16:54.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: Reconfiguring daemon mgr.x on vm10 2026-03-09T00:16:54.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: Reconfiguring osd.4 (monmap changed)... 2026-03-09T00:16:54.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: Reconfiguring daemon osd.4 on vm10 2026-03-09T00:16:54.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: Reconfiguring osd.5 (monmap changed)... 2026-03-09T00:16:54.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: Reconfiguring daemon osd.5 on vm10 2026-03-09T00:16:54.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:54.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:54.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.6"}]: dispatch 2026-03-09T00:16:54.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:16:54.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:54.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:54.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.7"}]: dispatch 2026-03-09T00:16:54.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:16:54.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:54.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:54.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm10.dwizvi", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-09T00:16:54.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:16:54.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:54.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:54.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:16:54.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:54.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:54.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:54.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-09T00:16:54.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]': finished 2026-03-09T00:16:54.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-09T00:16:54.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]': finished 2026-03-09T00:16:54.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:54.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:54.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:54.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:54.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:54.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:54.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:54.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-09T00:16:54.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-09T00:16:54.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:54.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:54.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:54.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:54.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:54.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:54.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:54.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-09T00:16:54.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-09T00:16:54.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:54.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-09T00:16:54.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-09T00:16:54.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:54.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-09T00:16:54.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-09T00:16:54.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:54.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-09T00:16:54.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-09T00:16:54.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:54.104 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-09T00:16:54.104 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-09T00:16:54.104 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:16:54.104 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-09T00:16:54.104 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-09T00:16:54.104 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]: dispatch 2026-03-09T00:16:54.104 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]': finished 2026-03-09T00:16:54.104 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:16:54.104 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon"}]': finished 2026-03-09T00:16:54.104 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]: dispatch 2026-03-09T00:16:54.104 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]': finished 2026-03-09T00:16:54.104 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd"}]: dispatch 2026-03-09T00:16:54.104 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mds"}]: dispatch 2026-03-09T00:16:54.104 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mds"}]': finished 2026-03-09T00:16:54.104 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw"}]: dispatch 2026-03-09T00:16:54.104 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]: dispatch 2026-03-09T00:16:54.104 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]': finished 2026-03-09T00:16:54.104 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:16:54.104 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]: dispatch 2026-03-09T00:16:54.104 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]': finished 2026-03-09T00:16:54.104 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.iscsi"}]: dispatch 2026-03-09T00:16:54.104 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]: dispatch 2026-03-09T00:16:54.104 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]': finished 2026-03-09T00:16:54.104 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]: dispatch 2026-03-09T00:16:54.104 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]': finished 2026-03-09T00:16:54.104 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:16:54.104 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:16:54.104 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:16:54.104 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:16:54.104 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:16:54.104 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:16:54.104 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]: dispatch 2026-03-09T00:16:54.104 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]': finished 2026-03-09T00:16:54.104 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:16:54.104 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:16:54.104 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:16:54.104 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:53 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:55.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:54 vm10 ceph-mon[82076]: Reconfiguring osd.6 (monmap changed)... 2026-03-09T00:16:55.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:54 vm10 ceph-mon[82076]: Reconfiguring daemon osd.6 on vm10 2026-03-09T00:16:55.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:54 vm10 ceph-mon[82076]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:16:55.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:54 vm10 ceph-mon[82076]: Reconfiguring osd.7 (monmap changed)... 2026-03-09T00:16:55.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:54 vm10 ceph-mon[82076]: Reconfiguring daemon osd.7 on vm10 2026-03-09T00:16:55.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:54 vm10 ceph-mon[82076]: Reconfiguring rgw.foo.vm10.dwizvi (monmap changed)... 2026-03-09T00:16:55.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:54 vm10 ceph-mon[82076]: Reconfiguring daemon rgw.foo.vm10.dwizvi on vm10 2026-03-09T00:16:55.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:54 vm10 ceph-mon[82076]: Upgrade: Setting container_image for all mon 2026-03-09T00:16:55.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:54 vm10 ceph-mon[82076]: Upgrade: Setting container_image for all crash 2026-03-09T00:16:55.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:54 vm10 ceph-mon[82076]: Upgrade: Setting container_image for all mds 2026-03-09T00:16:55.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:54 vm10 ceph-mon[82076]: Upgrade: Setting container_image for all rbd-mirror 2026-03-09T00:16:55.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:54 vm10 ceph-mon[82076]: Upgrade: Setting container_image for all cephfs-mirror 2026-03-09T00:16:55.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:54 vm10 ceph-mon[82076]: Upgrade: Setting container_image for all ceph-exporter 2026-03-09T00:16:55.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:54 vm10 ceph-mon[82076]: Upgrade: Setting container_image for all nfs 2026-03-09T00:16:55.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:54 vm10 ceph-mon[82076]: Upgrade: Setting container_image for all nvmeof 2026-03-09T00:16:55.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:54 vm10 ceph-mon[82076]: Upgrade: Setting container_image for all node-exporter 2026-03-09T00:16:55.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:54 vm10 ceph-mon[82076]: Upgrade: Setting container_image for all prometheus 2026-03-09T00:16:55.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:54 vm10 ceph-mon[82076]: Upgrade: Setting container_image for all alertmanager 2026-03-09T00:16:55.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:54 vm10 ceph-mon[82076]: Upgrade: Setting container_image for all grafana 2026-03-09T00:16:55.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:54 vm10 ceph-mon[82076]: Upgrade: Setting container_image for all loki 2026-03-09T00:16:55.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:54 vm10 ceph-mon[82076]: Upgrade: Setting container_image for all promtail 2026-03-09T00:16:55.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:54 vm10 ceph-mon[82076]: Upgrade: Finalizing container_image settings 2026-03-09T00:16:55.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:54 vm10 ceph-mon[82076]: Upgrade: Complete! 2026-03-09T00:16:55.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:54 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:16:55.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:54 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:16:55.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:54 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:16:55.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:54 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:55.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:54 vm10 ceph-mon[82076]: pgmap v9: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 17 KiB/s rd, 0 B/s wr, 7 op/s 2026-03-09T00:16:55.100 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:16:55 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:16:55] "GET /metrics HTTP/1.1" 200 34772 "" "Prometheus/2.51.0" 2026-03-09T00:16:55.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:54 vm04 ceph-mon[96438]: Reconfiguring osd.6 (monmap changed)... 2026-03-09T00:16:55.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:54 vm04 ceph-mon[96438]: Reconfiguring daemon osd.6 on vm10 2026-03-09T00:16:55.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:54 vm04 ceph-mon[96438]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:16:55.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:54 vm04 ceph-mon[96438]: Reconfiguring osd.7 (monmap changed)... 2026-03-09T00:16:55.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:54 vm04 ceph-mon[96438]: Reconfiguring daemon osd.7 on vm10 2026-03-09T00:16:55.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:54 vm04 ceph-mon[96438]: Reconfiguring rgw.foo.vm10.dwizvi (monmap changed)... 2026-03-09T00:16:55.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:54 vm04 ceph-mon[96438]: Reconfiguring daemon rgw.foo.vm10.dwizvi on vm10 2026-03-09T00:16:55.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:54 vm04 ceph-mon[96438]: Upgrade: Setting container_image for all mon 2026-03-09T00:16:55.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:54 vm04 ceph-mon[96438]: Upgrade: Setting container_image for all crash 2026-03-09T00:16:55.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:54 vm04 ceph-mon[96438]: Upgrade: Setting container_image for all mds 2026-03-09T00:16:55.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:54 vm04 ceph-mon[96438]: Upgrade: Setting container_image for all rbd-mirror 2026-03-09T00:16:55.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:54 vm04 ceph-mon[96438]: Upgrade: Setting container_image for all cephfs-mirror 2026-03-09T00:16:55.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:54 vm04 ceph-mon[96438]: Upgrade: Setting container_image for all ceph-exporter 2026-03-09T00:16:55.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:54 vm04 ceph-mon[96438]: Upgrade: Setting container_image for all nfs 2026-03-09T00:16:55.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:54 vm04 ceph-mon[96438]: Upgrade: Setting container_image for all nvmeof 2026-03-09T00:16:55.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:54 vm04 ceph-mon[96438]: Upgrade: Setting container_image for all node-exporter 2026-03-09T00:16:55.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:54 vm04 ceph-mon[96438]: Upgrade: Setting container_image for all prometheus 2026-03-09T00:16:55.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:54 vm04 ceph-mon[96438]: Upgrade: Setting container_image for all alertmanager 2026-03-09T00:16:55.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:54 vm04 ceph-mon[96438]: Upgrade: Setting container_image for all grafana 2026-03-09T00:16:55.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:54 vm04 ceph-mon[96438]: Upgrade: Setting container_image for all loki 2026-03-09T00:16:55.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:54 vm04 ceph-mon[96438]: Upgrade: Setting container_image for all promtail 2026-03-09T00:16:55.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:54 vm04 ceph-mon[96438]: Upgrade: Finalizing container_image settings 2026-03-09T00:16:55.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:54 vm04 ceph-mon[96438]: Upgrade: Complete! 2026-03-09T00:16:55.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:54 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:16:55.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:54 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:16:55.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:54 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:16:55.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:54 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:55.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:54 vm04 ceph-mon[96438]: pgmap v9: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 17 KiB/s rd, 0 B/s wr, 7 op/s 2026-03-09T00:16:55.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:54 vm04 ceph-mon[94619]: Reconfiguring osd.6 (monmap changed)... 2026-03-09T00:16:55.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:54 vm04 ceph-mon[94619]: Reconfiguring daemon osd.6 on vm10 2026-03-09T00:16:55.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:54 vm04 ceph-mon[94619]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:16:55.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:54 vm04 ceph-mon[94619]: Reconfiguring osd.7 (monmap changed)... 2026-03-09T00:16:55.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:54 vm04 ceph-mon[94619]: Reconfiguring daemon osd.7 on vm10 2026-03-09T00:16:55.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:54 vm04 ceph-mon[94619]: Reconfiguring rgw.foo.vm10.dwizvi (monmap changed)... 2026-03-09T00:16:55.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:54 vm04 ceph-mon[94619]: Reconfiguring daemon rgw.foo.vm10.dwizvi on vm10 2026-03-09T00:16:55.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:54 vm04 ceph-mon[94619]: Upgrade: Setting container_image for all mon 2026-03-09T00:16:55.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:54 vm04 ceph-mon[94619]: Upgrade: Setting container_image for all crash 2026-03-09T00:16:55.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:54 vm04 ceph-mon[94619]: Upgrade: Setting container_image for all mds 2026-03-09T00:16:55.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:54 vm04 ceph-mon[94619]: Upgrade: Setting container_image for all rbd-mirror 2026-03-09T00:16:55.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:54 vm04 ceph-mon[94619]: Upgrade: Setting container_image for all cephfs-mirror 2026-03-09T00:16:55.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:54 vm04 ceph-mon[94619]: Upgrade: Setting container_image for all ceph-exporter 2026-03-09T00:16:55.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:54 vm04 ceph-mon[94619]: Upgrade: Setting container_image for all nfs 2026-03-09T00:16:55.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:54 vm04 ceph-mon[94619]: Upgrade: Setting container_image for all nvmeof 2026-03-09T00:16:55.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:54 vm04 ceph-mon[94619]: Upgrade: Setting container_image for all node-exporter 2026-03-09T00:16:55.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:54 vm04 ceph-mon[94619]: Upgrade: Setting container_image for all prometheus 2026-03-09T00:16:55.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:54 vm04 ceph-mon[94619]: Upgrade: Setting container_image for all alertmanager 2026-03-09T00:16:55.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:54 vm04 ceph-mon[94619]: Upgrade: Setting container_image for all grafana 2026-03-09T00:16:55.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:54 vm04 ceph-mon[94619]: Upgrade: Setting container_image for all loki 2026-03-09T00:16:55.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:54 vm04 ceph-mon[94619]: Upgrade: Setting container_image for all promtail 2026-03-09T00:16:55.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:54 vm04 ceph-mon[94619]: Upgrade: Finalizing container_image settings 2026-03-09T00:16:55.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:54 vm04 ceph-mon[94619]: Upgrade: Complete! 2026-03-09T00:16:55.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:54 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:16:55.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:54 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:16:55.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:54 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:16:55.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:54 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:55.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:54 vm04 ceph-mon[94619]: pgmap v9: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 17 KiB/s rd, 0 B/s wr, 7 op/s 2026-03-09T00:16:57.016 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:56 vm10 ceph-mon[82076]: pgmap v10: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 0 B/s wr, 6 op/s 2026-03-09T00:16:57.016 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:56 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:16:57.016 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:56 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:57.351 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:16:57 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:16:57.002Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:16:57.351 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:16:57 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:16:57.003Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:16:57.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:56 vm04 ceph-mon[94619]: pgmap v10: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 0 B/s wr, 6 op/s 2026-03-09T00:16:57.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:56 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:16:57.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:56 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:57.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:56 vm04 ceph-mon[96438]: pgmap v10: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 0 B/s wr, 6 op/s 2026-03-09T00:16:57.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:56 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:16:57.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:56 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:16:59.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:16:58 vm10 ceph-mon[82076]: pgmap v11: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 0 B/s wr, 6 op/s 2026-03-09T00:16:59.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:16:58 vm04 ceph-mon[96438]: pgmap v11: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 0 B/s wr, 6 op/s 2026-03-09T00:16:59.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:16:58 vm04 ceph-mon[94619]: pgmap v11: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 0 B/s wr, 6 op/s 2026-03-09T00:17:01.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:00 vm10 ceph-mon[82076]: pgmap v12: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 18 KiB/s rd, 0 B/s wr, 11 op/s 2026-03-09T00:17:01.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:00 vm04 ceph-mon[96438]: pgmap v12: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 18 KiB/s rd, 0 B/s wr, 11 op/s 2026-03-09T00:17:01.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:00 vm04 ceph-mon[94619]: pgmap v12: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 18 KiB/s rd, 0 B/s wr, 11 op/s 2026-03-09T00:17:03.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:02 vm10 ceph-mon[82076]: pgmap v13: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 3.2 KiB/s rd, 0 B/s wr, 4 op/s 2026-03-09T00:17:03.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:02 vm04 ceph-mon[96438]: pgmap v13: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 3.2 KiB/s rd, 0 B/s wr, 4 op/s 2026-03-09T00:17:03.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:02 vm04 ceph-mon[94619]: pgmap v13: 161 pgs: 161 active+clean; 457 KiB data, 103 MiB used, 160 GiB / 160 GiB avail; 3.2 KiB/s rd, 0 B/s wr, 4 op/s 2026-03-09T00:17:04.229 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:03 vm10 ceph-mon[82076]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:17:04.252 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:03 vm04 ceph-mon[96438]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:17:04.252 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:03 vm04 ceph-mon[94619]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:17:05.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:04 vm10 ceph-mon[82076]: pgmap v14: 161 pgs: 161 active+clean; 457 KiB data, 105 MiB used, 160 GiB / 160 GiB avail; 73 KiB/s rd, 0 B/s wr, 120 op/s 2026-03-09T00:17:05.351 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:17:05 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:17:05] "GET /metrics HTTP/1.1" 200 37586 "" "Prometheus/2.51.0" 2026-03-09T00:17:05.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:04 vm04 ceph-mon[94619]: pgmap v14: 161 pgs: 161 active+clean; 457 KiB data, 105 MiB used, 160 GiB / 160 GiB avail; 73 KiB/s rd, 0 B/s wr, 120 op/s 2026-03-09T00:17:05.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:04 vm04 ceph-mon[96438]: pgmap v14: 161 pgs: 161 active+clean; 457 KiB data, 105 MiB used, 160 GiB / 160 GiB avail; 73 KiB/s rd, 0 B/s wr, 120 op/s 2026-03-09T00:17:05.566 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch ps' 2026-03-09T00:17:06.035 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-09T00:17:06.035 INFO:teuthology.orchestra.run.vm04.stdout:alertmanager.a vm04 *:9093,9094 running (13m) 17s ago 20m 28.2M - 0.25.0 c8568f914cd2 bcac0140b0f6 2026-03-09T00:17:06.036 INFO:teuthology.orchestra.run.vm04.stdout:grafana.a vm10 *:3000 running (86s) 22s ago 20m 78.5M - 10.4.0 c8b91775d855 aa7f793dcb8e 2026-03-09T00:17:06.036 INFO:teuthology.orchestra.run.vm04.stdout:iscsi.foo.vm04.fbyciv vm04 running (93s) 17s ago 20m 51.2M - 3.5 e1d6a67b021e cdb4168e72eb 2026-03-09T00:17:06.036 INFO:teuthology.orchestra.run.vm04.stdout:mgr.x vm10 *:8443,9283,8765 running (97s) 22s ago 22m 485M - 19.2.3-678-ge911bdeb 654f31e6858e c971ca6e9652 2026-03-09T00:17:06.036 INFO:teuthology.orchestra.run.vm04.stdout:mgr.y vm04 *:8443,9283,8765 running (10m) 17s ago 23m 539M - 19.2.3-678-ge911bdeb 654f31e6858e 72a51572b51b 2026-03-09T00:17:06.036 INFO:teuthology.orchestra.run.vm04.stdout:mon.a vm04 running (28s) 17s ago 23m 44.0M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 3a1ecb9ee7d1 2026-03-09T00:17:06.036 INFO:teuthology.orchestra.run.vm04.stdout:mon.b vm10 running (65s) 22s ago 22m 41.3M 2048M 19.2.3-678-ge911bdeb 654f31e6858e b102ade927df 2026-03-09T00:17:06.036 INFO:teuthology.orchestra.run.vm04.stdout:mon.c vm04 running (19s) 17s ago 22m 21.9M 2048M 19.2.3-678-ge911bdeb 654f31e6858e d5119f6d2345 2026-03-09T00:17:06.036 INFO:teuthology.orchestra.run.vm04.stdout:node-exporter.a vm04 *:9100 running (13m) 17s ago 20m 9987k - 1.7.0 72c9c2088986 38e0af6b2fbf 2026-03-09T00:17:06.036 INFO:teuthology.orchestra.run.vm04.stdout:node-exporter.b vm10 *:9100 running (13m) 22s ago 20m 10.1M - 1.7.0 72c9c2088986 d059c0022310 2026-03-09T00:17:06.036 INFO:teuthology.orchestra.run.vm04.stdout:osd.0 vm04 running (22m) 17s ago 22m 56.8M 4096M 17.2.0 e1d6a67b021e eb4d6ee04c91 2026-03-09T00:17:06.036 INFO:teuthology.orchestra.run.vm04.stdout:osd.1 vm04 running (22m) 17s ago 22m 56.0M 4096M 17.2.0 e1d6a67b021e f112f05700b8 2026-03-09T00:17:06.036 INFO:teuthology.orchestra.run.vm04.stdout:osd.2 vm04 running (21m) 17s ago 21m 52.1M 4096M 17.2.0 e1d6a67b021e a4ed5ecab7e4 2026-03-09T00:17:06.036 INFO:teuthology.orchestra.run.vm04.stdout:osd.3 vm04 running (21m) 17s ago 21m 54.8M 4096M 17.2.0 e1d6a67b021e d530f6e786d9 2026-03-09T00:17:06.036 INFO:teuthology.orchestra.run.vm04.stdout:osd.4 vm10 running (21m) 22s ago 21m 55.0M 4096M 17.2.0 e1d6a67b021e ad302e6f363c 2026-03-09T00:17:06.036 INFO:teuthology.orchestra.run.vm04.stdout:osd.5 vm10 running (21m) 22s ago 21m 53.8M 4096M 17.2.0 e1d6a67b021e 1bef86fdb303 2026-03-09T00:17:06.036 INFO:teuthology.orchestra.run.vm04.stdout:osd.6 vm10 running (21m) 22s ago 21m 52.4M 4096M 17.2.0 e1d6a67b021e 168db5828111 2026-03-09T00:17:06.036 INFO:teuthology.orchestra.run.vm04.stdout:osd.7 vm10 running (21m) 22s ago 21m 57.2M 4096M 17.2.0 e1d6a67b021e bc6bbac15079 2026-03-09T00:17:06.036 INFO:teuthology.orchestra.run.vm04.stdout:prometheus.a vm10 *:9095 running (99s) 22s ago 20m 51.3M - 2.51.0 1d3b7f56885b 1f53121cfa7f 2026-03-09T00:17:06.036 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.ehrfsf vm04 *:8000 running (20m) 17s ago 20m 99.4M - 17.2.0 e1d6a67b021e a815abb0c790 2026-03-09T00:17:06.036 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm10.dwizvi vm10 *:8000 running (20m) 22s ago 20m 97.3M - 17.2.0 e1d6a67b021e f6412acdf6e0 2026-03-09T00:17:06.036 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:05 vm04 ceph-mon[96438]: from='client.54103 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:17:06.036 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:05 vm04 ceph-mon[94619]: from='client.54103 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:17:06.099 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph versions | jq -e '"'"'.mon | length == 1'"'"'' 2026-03-09T00:17:06.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:05 vm10 ceph-mon[82076]: from='client.54103 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:17:06.604 INFO:teuthology.orchestra.run.vm04.stdout:true 2026-03-09T00:17:06.662 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph versions | jq -e '"'"'.mon | keys'"'"' | grep $sha1' 2026-03-09T00:17:07.210 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)" 2026-03-09T00:17:07.211 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:06 vm04 ceph-mon[94619]: pgmap v15: 161 pgs: 161 active+clean; 457 KiB data, 105 MiB used, 160 GiB / 160 GiB avail; 72 KiB/s rd, 0 B/s wr, 119 op/s 2026-03-09T00:17:07.211 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:06 vm04 ceph-mon[94619]: from='client.54109 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:17:07.211 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:06 vm04 ceph-mon[94619]: from='client.? 192.168.123.104:0/3638903759' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:17:07.211 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:06 vm04 ceph-mon[96438]: pgmap v15: 161 pgs: 161 active+clean; 457 KiB data, 105 MiB used, 160 GiB / 160 GiB avail; 72 KiB/s rd, 0 B/s wr, 119 op/s 2026-03-09T00:17:07.211 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:06 vm04 ceph-mon[96438]: from='client.54109 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:17:07.211 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:06 vm04 ceph-mon[96438]: from='client.? 192.168.123.104:0/3638903759' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:17:07.211 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:17:07 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:17:07.003Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:17:07.211 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:17:07 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:17:07.004Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:17:07.266 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch upgrade check quay.ceph.io/ceph-ci/ceph:$sha1 | jq -e '"'"'.up_to_date | length == 5'"'"'' 2026-03-09T00:17:07.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:06 vm10 ceph-mon[82076]: pgmap v15: 161 pgs: 161 active+clean; 457 KiB data, 105 MiB used, 160 GiB / 160 GiB avail; 72 KiB/s rd, 0 B/s wr, 119 op/s 2026-03-09T00:17:07.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:06 vm10 ceph-mon[82076]: from='client.54109 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:17:07.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:06 vm10 ceph-mon[82076]: from='client.? 192.168.123.104:0/3638903759' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:17:08.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:07 vm10 ceph-mon[82076]: from='client.? 192.168.123.104:0/4161208999' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:17:08.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:07 vm10 ceph-mon[82076]: from='client.54124 -' entity='client.admin' cmd=[{"prefix": "orch upgrade check", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:17:08.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:07 vm04 ceph-mon[96438]: from='client.? 192.168.123.104:0/4161208999' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:17:08.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:07 vm04 ceph-mon[96438]: from='client.54124 -' entity='client.admin' cmd=[{"prefix": "orch upgrade check", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:17:08.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:07 vm04 ceph-mon[94619]: from='client.? 192.168.123.104:0/4161208999' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:17:08.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:07 vm04 ceph-mon[94619]: from='client.54124 -' entity='client.admin' cmd=[{"prefix": "orch upgrade check", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:17:09.266 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:09 vm04 ceph-mon[96438]: pgmap v16: 161 pgs: 161 active+clean; 457 KiB data, 105 MiB used, 160 GiB / 160 GiB avail; 72 KiB/s rd, 0 B/s wr, 119 op/s 2026-03-09T00:17:09.266 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:09 vm04 ceph-mon[94619]: pgmap v16: 161 pgs: 161 active+clean; 457 KiB data, 105 MiB used, 160 GiB / 160 GiB avail; 72 KiB/s rd, 0 B/s wr, 119 op/s 2026-03-09T00:17:09.307 INFO:teuthology.orchestra.run.vm04.stdout:true 2026-03-09T00:17:09.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:09 vm10 ceph-mon[82076]: pgmap v16: 161 pgs: 161 active+clean; 457 KiB data, 105 MiB used, 160 GiB / 160 GiB avail; 72 KiB/s rd, 0 B/s wr, 119 op/s 2026-03-09T00:17:09.371 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch upgrade status' 2026-03-09T00:17:09.906 INFO:teuthology.orchestra.run.vm04.stdout:{ 2026-03-09T00:17:09.907 INFO:teuthology.orchestra.run.vm04.stdout: "target_image": null, 2026-03-09T00:17:09.907 INFO:teuthology.orchestra.run.vm04.stdout: "in_progress": false, 2026-03-09T00:17:09.907 INFO:teuthology.orchestra.run.vm04.stdout: "which": "", 2026-03-09T00:17:09.907 INFO:teuthology.orchestra.run.vm04.stdout: "services_complete": [], 2026-03-09T00:17:09.907 INFO:teuthology.orchestra.run.vm04.stdout: "progress": null, 2026-03-09T00:17:09.907 INFO:teuthology.orchestra.run.vm04.stdout: "message": "", 2026-03-09T00:17:09.907 INFO:teuthology.orchestra.run.vm04.stdout: "is_paused": false 2026-03-09T00:17:09.907 INFO:teuthology.orchestra.run.vm04.stdout:} 2026-03-09T00:17:09.958 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph health detail' 2026-03-09T00:17:10.533 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_OK 2026-03-09T00:17:10.614 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types osd --limit 2' 2026-03-09T00:17:11.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:11 vm04 ceph-mon[96438]: pgmap v17: 161 pgs: 161 active+clean; 457 KiB data, 105 MiB used, 160 GiB / 160 GiB avail; 73 KiB/s rd, 0 B/s wr, 120 op/s 2026-03-09T00:17:11.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:11 vm04 ceph-mon[96438]: from='client.54130 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:17:11.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:11 vm04 ceph-mon[96438]: from='client.? 192.168.123.104:0/404862763' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T00:17:11.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:11 vm04 ceph-mon[94619]: pgmap v17: 161 pgs: 161 active+clean; 457 KiB data, 105 MiB used, 160 GiB / 160 GiB avail; 73 KiB/s rd, 0 B/s wr, 120 op/s 2026-03-09T00:17:11.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:11 vm04 ceph-mon[94619]: from='client.54130 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:17:11.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:11 vm04 ceph-mon[94619]: from='client.? 192.168.123.104:0/404862763' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T00:17:11.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:11 vm10 ceph-mon[82076]: pgmap v17: 161 pgs: 161 active+clean; 457 KiB data, 105 MiB used, 160 GiB / 160 GiB avail; 73 KiB/s rd, 0 B/s wr, 120 op/s 2026-03-09T00:17:11.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:11 vm10 ceph-mon[82076]: from='client.54130 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:17:11.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:11 vm10 ceph-mon[82076]: from='client.? 192.168.123.104:0/404862763' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T00:17:12.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:12 vm10 ceph-mon[82076]: from='client.54142 -' entity='client.admin' cmd=[{"prefix": "orch upgrade start", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "daemon_types": "osd", "limit": 2, "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:17:12.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:12 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:17:12.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:12 vm04 ceph-mon[94619]: from='client.54142 -' entity='client.admin' cmd=[{"prefix": "orch upgrade start", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "daemon_types": "osd", "limit": 2, "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:17:12.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:12 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:17:12.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:12 vm04 ceph-mon[96438]: from='client.54142 -' entity='client.admin' cmd=[{"prefix": "orch upgrade start", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "daemon_types": "osd", "limit": 2, "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:17:12.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:12 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:17:12.701 INFO:teuthology.orchestra.run.vm04.stdout:Initiating upgrade to quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T00:17:12.777 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'while ceph orch upgrade status | jq '"'"'.in_progress'"'"' | grep true && ! ceph orch upgrade status | jq '"'"'.message'"'"' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; sleep 30 ; done' 2026-03-09T00:17:13.338 INFO:teuthology.orchestra.run.vm04.stdout:true 2026-03-09T00:17:13.785 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-09T00:17:13.785 INFO:teuthology.orchestra.run.vm04.stdout:alertmanager.a vm04 *:9093,9094 running (13m) 25s ago 20m 28.2M - 0.25.0 c8568f914cd2 bcac0140b0f6 2026-03-09T00:17:13.785 INFO:teuthology.orchestra.run.vm04.stdout:grafana.a vm10 *:3000 running (94s) 30s ago 20m 78.5M - 10.4.0 c8b91775d855 aa7f793dcb8e 2026-03-09T00:17:13.785 INFO:teuthology.orchestra.run.vm04.stdout:iscsi.foo.vm04.fbyciv vm04 running (101s) 25s ago 20m 51.2M - 3.5 e1d6a67b021e cdb4168e72eb 2026-03-09T00:17:13.785 INFO:teuthology.orchestra.run.vm04.stdout:mgr.x vm10 *:8443,9283,8765 running (105s) 30s ago 22m 485M - 19.2.3-678-ge911bdeb 654f31e6858e c971ca6e9652 2026-03-09T00:17:13.785 INFO:teuthology.orchestra.run.vm04.stdout:mgr.y vm04 *:8443,9283,8765 running (11m) 25s ago 23m 539M - 19.2.3-678-ge911bdeb 654f31e6858e 72a51572b51b 2026-03-09T00:17:13.785 INFO:teuthology.orchestra.run.vm04.stdout:mon.a vm04 running (36s) 25s ago 23m 44.0M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 3a1ecb9ee7d1 2026-03-09T00:17:13.785 INFO:teuthology.orchestra.run.vm04.stdout:mon.b vm10 running (73s) 30s ago 22m 41.3M 2048M 19.2.3-678-ge911bdeb 654f31e6858e b102ade927df 2026-03-09T00:17:13.785 INFO:teuthology.orchestra.run.vm04.stdout:mon.c vm04 running (27s) 25s ago 22m 21.9M 2048M 19.2.3-678-ge911bdeb 654f31e6858e d5119f6d2345 2026-03-09T00:17:13.785 INFO:teuthology.orchestra.run.vm04.stdout:node-exporter.a vm04 *:9100 running (14m) 25s ago 20m 9987k - 1.7.0 72c9c2088986 38e0af6b2fbf 2026-03-09T00:17:13.785 INFO:teuthology.orchestra.run.vm04.stdout:node-exporter.b vm10 *:9100 running (13m) 30s ago 20m 10.1M - 1.7.0 72c9c2088986 d059c0022310 2026-03-09T00:17:13.785 INFO:teuthology.orchestra.run.vm04.stdout:osd.0 vm04 running (22m) 25s ago 22m 56.8M 4096M 17.2.0 e1d6a67b021e eb4d6ee04c91 2026-03-09T00:17:13.785 INFO:teuthology.orchestra.run.vm04.stdout:osd.1 vm04 running (22m) 25s ago 22m 56.0M 4096M 17.2.0 e1d6a67b021e f112f05700b8 2026-03-09T00:17:13.785 INFO:teuthology.orchestra.run.vm04.stdout:osd.2 vm04 running (21m) 25s ago 21m 52.1M 4096M 17.2.0 e1d6a67b021e a4ed5ecab7e4 2026-03-09T00:17:13.785 INFO:teuthology.orchestra.run.vm04.stdout:osd.3 vm04 running (21m) 25s ago 21m 54.8M 4096M 17.2.0 e1d6a67b021e d530f6e786d9 2026-03-09T00:17:13.785 INFO:teuthology.orchestra.run.vm04.stdout:osd.4 vm10 running (21m) 30s ago 21m 55.0M 4096M 17.2.0 e1d6a67b021e ad302e6f363c 2026-03-09T00:17:13.785 INFO:teuthology.orchestra.run.vm04.stdout:osd.5 vm10 running (21m) 30s ago 21m 53.8M 4096M 17.2.0 e1d6a67b021e 1bef86fdb303 2026-03-09T00:17:13.785 INFO:teuthology.orchestra.run.vm04.stdout:osd.6 vm10 running (21m) 30s ago 21m 52.4M 4096M 17.2.0 e1d6a67b021e 168db5828111 2026-03-09T00:17:13.785 INFO:teuthology.orchestra.run.vm04.stdout:osd.7 vm10 running (21m) 30s ago 21m 57.2M 4096M 17.2.0 e1d6a67b021e bc6bbac15079 2026-03-09T00:17:13.785 INFO:teuthology.orchestra.run.vm04.stdout:prometheus.a vm10 *:9095 running (106s) 30s ago 20m 51.3M - 2.51.0 1d3b7f56885b 1f53121cfa7f 2026-03-09T00:17:13.785 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.ehrfsf vm04 *:8000 running (20m) 25s ago 20m 99.4M - 17.2.0 e1d6a67b021e a815abb0c790 2026-03-09T00:17:13.785 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm10.dwizvi vm10 *:8000 running (20m) 30s ago 20m 97.3M - 17.2.0 e1d6a67b021e f6412acdf6e0 2026-03-09T00:17:14.055 INFO:teuthology.orchestra.run.vm04.stdout:{ 2026-03-09T00:17:14.055 INFO:teuthology.orchestra.run.vm04.stdout: "mon": { 2026-03-09T00:17:14.055 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 3 2026-03-09T00:17:14.055 INFO:teuthology.orchestra.run.vm04.stdout: }, 2026-03-09T00:17:14.055 INFO:teuthology.orchestra.run.vm04.stdout: "mgr": { 2026-03-09T00:17:14.055 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-09T00:17:14.055 INFO:teuthology.orchestra.run.vm04.stdout: }, 2026-03-09T00:17:14.055 INFO:teuthology.orchestra.run.vm04.stdout: "osd": { 2026-03-09T00:17:14.055 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 8 2026-03-09T00:17:14.056 INFO:teuthology.orchestra.run.vm04.stdout: }, 2026-03-09T00:17:14.056 INFO:teuthology.orchestra.run.vm04.stdout: "rgw": { 2026-03-09T00:17:14.056 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 2 2026-03-09T00:17:14.056 INFO:teuthology.orchestra.run.vm04.stdout: }, 2026-03-09T00:17:14.056 INFO:teuthology.orchestra.run.vm04.stdout: "overall": { 2026-03-09T00:17:14.056 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 10, 2026-03-09T00:17:14.056 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 5 2026-03-09T00:17:14.056 INFO:teuthology.orchestra.run.vm04.stdout: } 2026-03-09T00:17:14.056 INFO:teuthology.orchestra.run.vm04.stdout:} 2026-03-09T00:17:14.056 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:13 vm04 ceph-mon[96438]: pgmap v18: 161 pgs: 161 active+clean; 457 KiB data, 105 MiB used, 160 GiB / 160 GiB avail; 70 KiB/s rd, 0 B/s wr, 115 op/s 2026-03-09T00:17:14.056 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:13 vm04 ceph-mon[96438]: Upgrade: Started with target quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T00:17:14.056 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:13 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:14.056 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:13 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:17:14.056 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:13 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:17:14.056 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:13 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:17:14.056 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:13 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:14.056 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:13 vm04 ceph-mon[94619]: pgmap v18: 161 pgs: 161 active+clean; 457 KiB data, 105 MiB used, 160 GiB / 160 GiB avail; 70 KiB/s rd, 0 B/s wr, 115 op/s 2026-03-09T00:17:14.056 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:13 vm04 ceph-mon[94619]: Upgrade: Started with target quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T00:17:14.056 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:13 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:14.056 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:13 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:17:14.056 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:13 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:17:14.056 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:13 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:17:14.056 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:13 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:14.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:13 vm10 ceph-mon[82076]: pgmap v18: 161 pgs: 161 active+clean; 457 KiB data, 105 MiB used, 160 GiB / 160 GiB avail; 70 KiB/s rd, 0 B/s wr, 115 op/s 2026-03-09T00:17:14.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:13 vm10 ceph-mon[82076]: Upgrade: Started with target quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T00:17:14.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:13 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:14.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:13 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:17:14.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:13 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:17:14.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:13 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:17:14.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:13 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:14.275 INFO:teuthology.orchestra.run.vm04.stdout:{ 2026-03-09T00:17:14.275 INFO:teuthology.orchestra.run.vm04.stdout: "target_image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", 2026-03-09T00:17:14.275 INFO:teuthology.orchestra.run.vm04.stdout: "in_progress": true, 2026-03-09T00:17:14.275 INFO:teuthology.orchestra.run.vm04.stdout: "which": "Upgrading daemons of type(s) osd. Upgrade limited to 2 daemons (2 remaining).", 2026-03-09T00:17:14.275 INFO:teuthology.orchestra.run.vm04.stdout: "services_complete": [], 2026-03-09T00:17:14.275 INFO:teuthology.orchestra.run.vm04.stdout: "progress": "", 2026-03-09T00:17:14.275 INFO:teuthology.orchestra.run.vm04.stdout: "message": "Doing first pull of quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df image", 2026-03-09T00:17:14.275 INFO:teuthology.orchestra.run.vm04.stdout: "is_paused": false 2026-03-09T00:17:14.275 INFO:teuthology.orchestra.run.vm04.stdout:} 2026-03-09T00:17:14.898 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:14 vm04 ceph-mon[94619]: Upgrade: First pull of quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T00:17:14.898 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:14 vm04 ceph-mon[94619]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:17:14.899 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:14 vm04 ceph-mon[94619]: from='client.44163 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:17:14.899 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:14 vm04 ceph-mon[94619]: from='client.54154 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:17:14.899 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:14 vm04 ceph-mon[94619]: from='client.54160 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:17:14.899 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:14 vm04 ceph-mon[94619]: from='client.? 192.168.123.104:0/643393873' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:17:14.899 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:14 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:14.899 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:14 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:17:14.899 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:14 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:17:14.899 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:14 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:14.899 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:14 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:17:14.899 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:14 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:14.899 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:14 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:17:14.899 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:14 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:14.899 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:14 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 16}]: dispatch 2026-03-09T00:17:14.899 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:14 vm04 ceph-mon[96438]: Upgrade: First pull of quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T00:17:14.899 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:14 vm04 ceph-mon[96438]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:17:14.899 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:14 vm04 ceph-mon[96438]: from='client.44163 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:17:14.899 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:14 vm04 ceph-mon[96438]: from='client.54154 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:17:14.899 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:14 vm04 ceph-mon[96438]: from='client.54160 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:17:14.899 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:14 vm04 ceph-mon[96438]: from='client.? 192.168.123.104:0/643393873' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:17:14.899 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:14 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:14.899 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:14 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:17:14.899 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:14 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:17:14.899 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:14 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:14.899 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:14 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:17:14.899 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:14 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:14.899 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:14 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:17:14.899 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:14 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:14.899 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:14 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 16}]: dispatch 2026-03-09T00:17:14.899 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:14 vm10 ceph-mon[82076]: Upgrade: First pull of quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T00:17:14.899 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:14 vm10 ceph-mon[82076]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:17:14.899 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:14 vm10 ceph-mon[82076]: from='client.44163 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:17:14.899 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:14 vm10 ceph-mon[82076]: from='client.54154 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:17:14.899 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:14 vm10 ceph-mon[82076]: from='client.54160 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:17:14.899 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:14 vm10 ceph-mon[82076]: from='client.? 192.168.123.104:0/643393873' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:17:14.899 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:14 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:14.899 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:14 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:17:14.899 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:14 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:17:14.899 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:14 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:14.899 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:14 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:17:14.899 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:14 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:14.899 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:14 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:17:14.899 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:14 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:14.899 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:14 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 16}]: dispatch 2026-03-09T00:17:15.159 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:17:15 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:17:15] "GET /metrics HTTP/1.1" 200 37583 "" "Prometheus/2.51.0" 2026-03-09T00:17:15.468 INFO:journalctl@ceph.osd.0.vm04.stdout:Mar 09 00:17:15 vm04 systemd[1]: Stopping Ceph osd.0 for fdcbddf6-1b49-11f1-80b0-7392062373f9... 2026-03-09T00:17:15.730 INFO:journalctl@ceph.osd.0.vm04.stdout:Mar 09 00:17:15 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-0[54082]: 2026-03-09T00:17:15.466+0000 7f4601a37700 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-09T00:17:15.730 INFO:journalctl@ceph.osd.0.vm04.stdout:Mar 09 00:17:15 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-0[54082]: 2026-03-09T00:17:15.466+0000 7f4601a37700 -1 osd.0 97 *** Got signal Terminated *** 2026-03-09T00:17:15.730 INFO:journalctl@ceph.osd.0.vm04.stdout:Mar 09 00:17:15 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-0[54082]: 2026-03-09T00:17:15.466+0000 7f4601a37700 -1 osd.0 97 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-09T00:17:15.981 INFO:journalctl@ceph.osd.0.vm04.stdout:Mar 09 00:17:15 vm04 podman[101579]: 2026-03-09 00:17:15.787784848 +0000 UTC m=+0.332435304 container died eb4d6ee04c9120a704214937313d62dac125a291ff0f8da18f5e45a388ea900a (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-0, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, version=8, release=754, GIT_BRANCH=HEAD, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, com.redhat.component=centos-stream-container, CEPH_POINT_RELEASE=-17.2.0, GIT_CLEAN=True, architecture=x86_64, io.openshift.tags=base centos centos-stream, vcs-type=git, build-date=2022-05-03T08:36:31.336870, io.k8s.display-name=CentOS Stream 8, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, vendor=Red Hat, Inc., GIT_REPO=https://github.com/ceph/ceph-container.git, RELEASE=HEAD, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, io.buildah.version=1.19.8, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, io.openshift.expose-services=, maintainer=Guillaume Abrioux , name=centos-stream, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, ceph=True, distribution-scope=public) 2026-03-09T00:17:15.981 INFO:journalctl@ceph.osd.0.vm04.stdout:Mar 09 00:17:15 vm04 podman[101579]: 2026-03-09 00:17:15.812406004 +0000 UTC m=+0.357056460 container remove eb4d6ee04c9120a704214937313d62dac125a291ff0f8da18f5e45a388ea900a (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-0, name=centos-stream, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, ceph=True, vendor=Red Hat, Inc., build-date=2022-05-03T08:36:31.336870, io.openshift.expose-services=, GIT_REPO=https://github.com/ceph/ceph-container.git, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, io.openshift.tags=base centos centos-stream, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., CEPH_POINT_RELEASE=-17.2.0, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, io.k8s.display-name=CentOS Stream 8, RELEASE=HEAD, com.redhat.component=centos-stream-container, maintainer=Guillaume Abrioux , GIT_CLEAN=True, release=754, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, GIT_BRANCH=HEAD, version=8, architecture=x86_64, distribution-scope=public, vcs-type=git, io.buildah.version=1.19.8) 2026-03-09T00:17:15.981 INFO:journalctl@ceph.osd.0.vm04.stdout:Mar 09 00:17:15 vm04 bash[101579]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-0 2026-03-09T00:17:15.981 INFO:journalctl@ceph.osd.0.vm04.stdout:Mar 09 00:17:15 vm04 podman[101643]: 2026-03-09 00:17:15.953407436 +0000 UTC m=+0.015503725 container create bfbf9270d073872681d856ddc869707fbbd82cc097e9fb66d8dcc35396b10ac8 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-0-deactivate, org.label-schema.vendor=CentOS, CEPH_REF=squid, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.opencontainers.image.authors=Ceph Release Team , io.buildah.version=1.41.3, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2) 2026-03-09T00:17:15.981 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:15 vm04 ceph-mon[96438]: pgmap v19: 161 pgs: 161 active+clean; 457 KiB data, 105 MiB used, 160 GiB / 160 GiB avail; 70 KiB/s rd, 0 B/s wr, 116 op/s 2026-03-09T00:17:15.981 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:15 vm04 ceph-mon[96438]: from='client.44184 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:17:15.981 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:15 vm04 ceph-mon[96438]: Upgrade: Target is version 19.2.3-678-ge911bdeb (squid) 2026-03-09T00:17:15.982 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:15 vm04 ceph-mon[96438]: Upgrade: Target container is quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, digests ['quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc'] 2026-03-09T00:17:15.982 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:15 vm04 ceph-mon[96438]: Upgrade: Setting container_image for all mgr 2026-03-09T00:17:15.982 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:15 vm04 ceph-mon[96438]: Upgrade: Setting container_image for all mon 2026-03-09T00:17:15.982 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:15 vm04 ceph-mon[96438]: Upgrade: Setting container_image for all crash 2026-03-09T00:17:15.982 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:15 vm04 ceph-mon[96438]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 16}]: dispatch 2026-03-09T00:17:15.982 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:15 vm04 ceph-mon[96438]: Upgrade: osd.0 is safe to restart 2026-03-09T00:17:15.982 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:15 vm04 ceph-mon[96438]: Upgrade: Updating osd.0 2026-03-09T00:17:15.982 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:15 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:15.982 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:15 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.0"}]: dispatch 2026-03-09T00:17:15.982 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:15 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:17:15.982 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:15 vm04 ceph-mon[96438]: Deploying daemon osd.0 on vm04 2026-03-09T00:17:15.982 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:15 vm04 ceph-mon[96438]: osd.0 marked itself down and dead 2026-03-09T00:17:15.982 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:15 vm04 ceph-mon[94619]: pgmap v19: 161 pgs: 161 active+clean; 457 KiB data, 105 MiB used, 160 GiB / 160 GiB avail; 70 KiB/s rd, 0 B/s wr, 116 op/s 2026-03-09T00:17:15.982 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:15 vm04 ceph-mon[94619]: from='client.44184 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:17:15.982 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:15 vm04 ceph-mon[94619]: Upgrade: Target is version 19.2.3-678-ge911bdeb (squid) 2026-03-09T00:17:15.982 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:15 vm04 ceph-mon[94619]: Upgrade: Target container is quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, digests ['quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc'] 2026-03-09T00:17:15.982 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:15 vm04 ceph-mon[94619]: Upgrade: Setting container_image for all mgr 2026-03-09T00:17:15.982 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:15 vm04 ceph-mon[94619]: Upgrade: Setting container_image for all mon 2026-03-09T00:17:15.982 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:15 vm04 ceph-mon[94619]: Upgrade: Setting container_image for all crash 2026-03-09T00:17:15.982 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:15 vm04 ceph-mon[94619]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 16}]: dispatch 2026-03-09T00:17:15.982 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:15 vm04 ceph-mon[94619]: Upgrade: osd.0 is safe to restart 2026-03-09T00:17:15.982 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:15 vm04 ceph-mon[94619]: Upgrade: Updating osd.0 2026-03-09T00:17:15.982 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:15 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:15.982 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:15 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.0"}]: dispatch 2026-03-09T00:17:15.982 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:15 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:17:15.982 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:15 vm04 ceph-mon[94619]: Deploying daemon osd.0 on vm04 2026-03-09T00:17:15.982 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:15 vm04 ceph-mon[94619]: osd.0 marked itself down and dead 2026-03-09T00:17:16.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:15 vm10 ceph-mon[82076]: pgmap v19: 161 pgs: 161 active+clean; 457 KiB data, 105 MiB used, 160 GiB / 160 GiB avail; 70 KiB/s rd, 0 B/s wr, 116 op/s 2026-03-09T00:17:16.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:15 vm10 ceph-mon[82076]: from='client.44184 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:17:16.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:15 vm10 ceph-mon[82076]: Upgrade: Target is version 19.2.3-678-ge911bdeb (squid) 2026-03-09T00:17:16.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:15 vm10 ceph-mon[82076]: Upgrade: Target container is quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, digests ['quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc'] 2026-03-09T00:17:16.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:15 vm10 ceph-mon[82076]: Upgrade: Setting container_image for all mgr 2026-03-09T00:17:16.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:15 vm10 ceph-mon[82076]: Upgrade: Setting container_image for all mon 2026-03-09T00:17:16.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:15 vm10 ceph-mon[82076]: Upgrade: Setting container_image for all crash 2026-03-09T00:17:16.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:15 vm10 ceph-mon[82076]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 16}]: dispatch 2026-03-09T00:17:16.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:15 vm10 ceph-mon[82076]: Upgrade: osd.0 is safe to restart 2026-03-09T00:17:16.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:15 vm10 ceph-mon[82076]: Upgrade: Updating osd.0 2026-03-09T00:17:16.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:15 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:16.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:15 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.0"}]: dispatch 2026-03-09T00:17:16.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:15 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:17:16.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:15 vm10 ceph-mon[82076]: Deploying daemon osd.0 on vm04 2026-03-09T00:17:16.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:15 vm10 ceph-mon[82076]: osd.0 marked itself down and dead 2026-03-09T00:17:16.307 INFO:journalctl@ceph.osd.0.vm04.stdout:Mar 09 00:17:15 vm04 podman[101643]: 2026-03-09 00:17:15.991501315 +0000 UTC m=+0.053597604 container init bfbf9270d073872681d856ddc869707fbbd82cc097e9fb66d8dcc35396b10ac8 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-0-deactivate, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, io.buildah.version=1.41.3, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=squid, org.label-schema.build-date=20260223, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.schema-version=1.0, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-09T00:17:16.307 INFO:journalctl@ceph.osd.0.vm04.stdout:Mar 09 00:17:15 vm04 podman[101643]: 2026-03-09 00:17:15.994187021 +0000 UTC m=+0.056283310 container start bfbf9270d073872681d856ddc869707fbbd82cc097e9fb66d8dcc35396b10ac8 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-0-deactivate, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.build-date=20260223, ceph=True, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git) 2026-03-09T00:17:16.307 INFO:journalctl@ceph.osd.0.vm04.stdout:Mar 09 00:17:15 vm04 podman[101643]: 2026-03-09 00:17:15.998206773 +0000 UTC m=+0.060303062 container attach bfbf9270d073872681d856ddc869707fbbd82cc097e9fb66d8dcc35396b10ac8 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-0-deactivate, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.build-date=20260223, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default) 2026-03-09T00:17:16.307 INFO:journalctl@ceph.osd.0.vm04.stdout:Mar 09 00:17:16 vm04 podman[101643]: 2026-03-09 00:17:15.946985087 +0000 UTC m=+0.009081376 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc 2026-03-09T00:17:16.307 INFO:journalctl@ceph.osd.0.vm04.stdout:Mar 09 00:17:16 vm04 conmon[101654]: conmon bfbf9270d073872681d8 : Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-bfbf9270d073872681d856ddc869707fbbd82cc097e9fb66d8dcc35396b10ac8.scope/container/memory.events 2026-03-09T00:17:16.307 INFO:journalctl@ceph.osd.0.vm04.stdout:Mar 09 00:17:16 vm04 podman[101643]: 2026-03-09 00:17:16.106631922 +0000 UTC m=+0.168728211 container died bfbf9270d073872681d856ddc869707fbbd82cc097e9fb66d8dcc35396b10ac8 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-0-deactivate, io.buildah.version=1.41.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, CEPH_REF=squid, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9) 2026-03-09T00:17:16.307 INFO:journalctl@ceph.osd.0.vm04.stdout:Mar 09 00:17:16 vm04 podman[101643]: 2026-03-09 00:17:16.123230384 +0000 UTC m=+0.185326673 container remove bfbf9270d073872681d856ddc869707fbbd82cc097e9fb66d8dcc35396b10ac8 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-0-deactivate, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.label-schema.build-date=20260223, CEPH_REF=squid, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git) 2026-03-09T00:17:16.307 INFO:journalctl@ceph.osd.0.vm04.stdout:Mar 09 00:17:16 vm04 systemd[1]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@osd.0.service: Deactivated successfully. 2026-03-09T00:17:16.307 INFO:journalctl@ceph.osd.0.vm04.stdout:Mar 09 00:17:16 vm04 systemd[1]: Stopped Ceph osd.0 for fdcbddf6-1b49-11f1-80b0-7392062373f9. 2026-03-09T00:17:16.307 INFO:journalctl@ceph.osd.0.vm04.stdout:Mar 09 00:17:16 vm04 systemd[1]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@osd.0.service: Consumed 11.617s CPU time. 2026-03-09T00:17:16.307 INFO:journalctl@ceph.osd.0.vm04.stdout:Mar 09 00:17:16 vm04 systemd[1]: Starting Ceph osd.0 for fdcbddf6-1b49-11f1-80b0-7392062373f9... 2026-03-09T00:17:16.602 INFO:journalctl@ceph.osd.0.vm04.stdout:Mar 09 00:17:16 vm04 podman[101745]: 2026-03-09 00:17:16.390462896 +0000 UTC m=+0.016113737 container create e681dc4624fc8078cf220537a5e646001ef8178e774f251a046fa69157b6a104 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-0-activate, CEPH_REF=squid, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team , GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, ceph=True, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9) 2026-03-09T00:17:16.602 INFO:journalctl@ceph.osd.0.vm04.stdout:Mar 09 00:17:16 vm04 podman[101745]: 2026-03-09 00:17:16.431150409 +0000 UTC m=+0.056801270 container init e681dc4624fc8078cf220537a5e646001ef8178e774f251a046fa69157b6a104 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-0-activate, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20260223, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, CEPH_REF=squid, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-09T00:17:16.602 INFO:journalctl@ceph.osd.0.vm04.stdout:Mar 09 00:17:16 vm04 podman[101745]: 2026-03-09 00:17:16.434628016 +0000 UTC m=+0.060278867 container start e681dc4624fc8078cf220537a5e646001ef8178e774f251a046fa69157b6a104 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-0-activate, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20260223, ceph=True, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.vendor=CentOS, CEPH_REF=squid, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team ) 2026-03-09T00:17:16.602 INFO:journalctl@ceph.osd.0.vm04.stdout:Mar 09 00:17:16 vm04 podman[101745]: 2026-03-09 00:17:16.435607209 +0000 UTC m=+0.061258060 container attach e681dc4624fc8078cf220537a5e646001ef8178e774f251a046fa69157b6a104 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-0-activate, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team , GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, CEPH_REF=squid, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, ceph=True, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, io.buildah.version=1.41.3) 2026-03-09T00:17:16.602 INFO:journalctl@ceph.osd.0.vm04.stdout:Mar 09 00:17:16 vm04 podman[101745]: 2026-03-09 00:17:16.382893911 +0000 UTC m=+0.008544772 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc 2026-03-09T00:17:16.602 INFO:journalctl@ceph.osd.0.vm04.stdout:Mar 09 00:17:16 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-0-activate[101756]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T00:17:16.602 INFO:journalctl@ceph.osd.0.vm04.stdout:Mar 09 00:17:16 vm04 bash[101745]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T00:17:16.602 INFO:journalctl@ceph.osd.0.vm04.stdout:Mar 09 00:17:16 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-0-activate[101756]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T00:17:16.602 INFO:journalctl@ceph.osd.0.vm04.stdout:Mar 09 00:17:16 vm04 bash[101745]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T00:17:16.989 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:16 vm04 ceph-mon[96438]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-09T00:17:16.989 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:16 vm04 ceph-mon[96438]: osdmap e98: 8 total, 7 up, 8 in 2026-03-09T00:17:16.989 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:16 vm04 ceph-mon[94619]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-09T00:17:16.989 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:16 vm04 ceph-mon[94619]: osdmap e98: 8 total, 7 up, 8 in 2026-03-09T00:17:16.989 INFO:journalctl@ceph.osd.0.vm04.stdout:Mar 09 00:17:16 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-0-activate[101756]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-09T00:17:16.989 INFO:journalctl@ceph.osd.0.vm04.stdout:Mar 09 00:17:16 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-0-activate[101756]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T00:17:17.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:16 vm10 ceph-mon[82076]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-09T00:17:17.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:16 vm10 ceph-mon[82076]: osdmap e98: 8 total, 7 up, 8 in 2026-03-09T00:17:17.270 INFO:journalctl@ceph.osd.0.vm04.stdout:Mar 09 00:17:16 vm04 bash[101745]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-09T00:17:17.270 INFO:journalctl@ceph.osd.0.vm04.stdout:Mar 09 00:17:16 vm04 bash[101745]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T00:17:17.270 INFO:journalctl@ceph.osd.0.vm04.stdout:Mar 09 00:17:17 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-0-activate[101756]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T00:17:17.270 INFO:journalctl@ceph.osd.0.vm04.stdout:Mar 09 00:17:17 vm04 bash[101745]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T00:17:17.270 INFO:journalctl@ceph.osd.0.vm04.stdout:Mar 09 00:17:17 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-0-activate[101756]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0 2026-03-09T00:17:17.270 INFO:journalctl@ceph.osd.0.vm04.stdout:Mar 09 00:17:17 vm04 bash[101745]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0 2026-03-09T00:17:17.270 INFO:journalctl@ceph.osd.0.vm04.stdout:Mar 09 00:17:17 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-0-activate[101756]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-5b26e919-8888-45a7-b9bb-60fafb9d7e9e/osd-block-d3ce7c0b-7841-417d-8412-02f631c2946d --path /var/lib/ceph/osd/ceph-0 --no-mon-config 2026-03-09T00:17:17.270 INFO:journalctl@ceph.osd.0.vm04.stdout:Mar 09 00:17:17 vm04 bash[101745]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-5b26e919-8888-45a7-b9bb-60fafb9d7e9e/osd-block-d3ce7c0b-7841-417d-8412-02f631c2946d --path /var/lib/ceph/osd/ceph-0 --no-mon-config 2026-03-09T00:17:17.270 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:17:17 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:17:17.004Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:17:17.270 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:17:17 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:17:17.005Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:17:17.529 INFO:journalctl@ceph.osd.0.vm04.stdout:Mar 09 00:17:17 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-0-activate[101756]: Running command: /usr/bin/ln -snf /dev/ceph-5b26e919-8888-45a7-b9bb-60fafb9d7e9e/osd-block-d3ce7c0b-7841-417d-8412-02f631c2946d /var/lib/ceph/osd/ceph-0/block 2026-03-09T00:17:17.529 INFO:journalctl@ceph.osd.0.vm04.stdout:Mar 09 00:17:17 vm04 bash[101745]: Running command: /usr/bin/ln -snf /dev/ceph-5b26e919-8888-45a7-b9bb-60fafb9d7e9e/osd-block-d3ce7c0b-7841-417d-8412-02f631c2946d /var/lib/ceph/osd/ceph-0/block 2026-03-09T00:17:17.529 INFO:journalctl@ceph.osd.0.vm04.stdout:Mar 09 00:17:17 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-0-activate[101756]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-0/block 2026-03-09T00:17:17.529 INFO:journalctl@ceph.osd.0.vm04.stdout:Mar 09 00:17:17 vm04 bash[101745]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-0/block 2026-03-09T00:17:17.529 INFO:journalctl@ceph.osd.0.vm04.stdout:Mar 09 00:17:17 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-0-activate[101756]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-0 2026-03-09T00:17:17.529 INFO:journalctl@ceph.osd.0.vm04.stdout:Mar 09 00:17:17 vm04 bash[101745]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-0 2026-03-09T00:17:17.529 INFO:journalctl@ceph.osd.0.vm04.stdout:Mar 09 00:17:17 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-0-activate[101756]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0 2026-03-09T00:17:17.529 INFO:journalctl@ceph.osd.0.vm04.stdout:Mar 09 00:17:17 vm04 bash[101745]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0 2026-03-09T00:17:17.529 INFO:journalctl@ceph.osd.0.vm04.stdout:Mar 09 00:17:17 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-0-activate[101756]: --> ceph-volume lvm activate successful for osd ID: 0 2026-03-09T00:17:17.529 INFO:journalctl@ceph.osd.0.vm04.stdout:Mar 09 00:17:17 vm04 bash[101745]: --> ceph-volume lvm activate successful for osd ID: 0 2026-03-09T00:17:17.529 INFO:journalctl@ceph.osd.0.vm04.stdout:Mar 09 00:17:17 vm04 conmon[101756]: conmon e681dc4624fc8078cf22 : Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-e681dc4624fc8078cf220537a5e646001ef8178e774f251a046fa69157b6a104.scope/container/memory.events 2026-03-09T00:17:17.529 INFO:journalctl@ceph.osd.0.vm04.stdout:Mar 09 00:17:17 vm04 podman[101745]: 2026-03-09 00:17:17.363125025 +0000 UTC m=+0.988775876 container died e681dc4624fc8078cf220537a5e646001ef8178e774f251a046fa69157b6a104 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-0-activate, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.label-schema.build-date=20260223, org.label-schema.vendor=CentOS, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True, CEPH_REF=squid, org.label-schema.name=CentOS Stream 9 Base Image) 2026-03-09T00:17:17.529 INFO:journalctl@ceph.osd.0.vm04.stdout:Mar 09 00:17:17 vm04 podman[101745]: 2026-03-09 00:17:17.38493448 +0000 UTC m=+1.010585331 container remove e681dc4624fc8078cf220537a5e646001ef8178e774f251a046fa69157b6a104 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-0-activate, io.buildah.version=1.41.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team , ceph=True, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20260223, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid, org.label-schema.schema-version=1.0) 2026-03-09T00:17:17.529 INFO:journalctl@ceph.osd.0.vm04.stdout:Mar 09 00:17:17 vm04 podman[101991]: 2026-03-09 00:17:17.477209814 +0000 UTC m=+0.016539533 container create a5eb77bcb38b57fff2a27f653aba4c12d6ba237a6d15c0a44fae46205a548bfb (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, org.label-schema.build-date=20260223, io.buildah.version=1.41.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git) 2026-03-09T00:17:17.529 INFO:journalctl@ceph.osd.0.vm04.stdout:Mar 09 00:17:17 vm04 podman[101991]: 2026-03-09 00:17:17.509579261 +0000 UTC m=+0.048908980 container init a5eb77bcb38b57fff2a27f653aba4c12d6ba237a6d15c0a44fae46205a548bfb (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-0, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.41.3, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, CEPH_REF=squid, ceph=True, OSD_FLAVOR=default, org.label-schema.build-date=20260223, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df) 2026-03-09T00:17:17.529 INFO:journalctl@ceph.osd.0.vm04.stdout:Mar 09 00:17:17 vm04 podman[101991]: 2026-03-09 00:17:17.513067018 +0000 UTC m=+0.052396737 container start a5eb77bcb38b57fff2a27f653aba4c12d6ba237a6d15c0a44fae46205a548bfb (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-0, org.label-schema.build-date=20260223, CEPH_REF=squid, org.label-schema.license=GPLv2, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, io.buildah.version=1.41.3, ceph=True, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team , GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-09T00:17:17.529 INFO:journalctl@ceph.osd.0.vm04.stdout:Mar 09 00:17:17 vm04 bash[101991]: a5eb77bcb38b57fff2a27f653aba4c12d6ba237a6d15c0a44fae46205a548bfb 2026-03-09T00:17:17.529 INFO:journalctl@ceph.osd.0.vm04.stdout:Mar 09 00:17:17 vm04 podman[101991]: 2026-03-09 00:17:17.470885891 +0000 UTC m=+0.010215619 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc 2026-03-09T00:17:17.529 INFO:journalctl@ceph.osd.0.vm04.stdout:Mar 09 00:17:17 vm04 systemd[1]: Started Ceph osd.0 for fdcbddf6-1b49-11f1-80b0-7392062373f9. 2026-03-09T00:17:17.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:17 vm04 ceph-mon[96438]: pgmap v21: 161 pgs: 21 stale+active+clean, 140 active+clean; 457 KiB data, 105 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-09T00:17:17.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:17 vm04 ceph-mon[96438]: osdmap e99: 8 total, 7 up, 8 in 2026-03-09T00:17:17.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:17 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:17.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:17 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:17.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:17 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:17.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:17 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:17:17.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:17 vm04 ceph-mon[94619]: pgmap v21: 161 pgs: 21 stale+active+clean, 140 active+clean; 457 KiB data, 105 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-09T00:17:17.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:17 vm04 ceph-mon[94619]: osdmap e99: 8 total, 7 up, 8 in 2026-03-09T00:17:17.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:17 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:17.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:17 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:17.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:17 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:17.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:17 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:17:18.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:17 vm10 ceph-mon[82076]: pgmap v21: 161 pgs: 21 stale+active+clean, 140 active+clean; 457 KiB data, 105 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-09T00:17:18.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:17 vm10 ceph-mon[82076]: osdmap e99: 8 total, 7 up, 8 in 2026-03-09T00:17:18.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:17 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:18.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:17 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:18.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:17 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:18.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:17 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:17:18.488 INFO:journalctl@ceph.osd.0.vm04.stdout:Mar 09 00:17:18 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-0[102001]: 2026-03-09T00:17:18.332+0000 7ffb1f70c740 -1 Falling back to public interface 2026-03-09T00:17:18.744 INFO:journalctl@ceph.osd.0.vm04.stdout:Mar 09 00:17:18 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-0[102001]: 2026-03-09T00:17:18.699+0000 7ffb1f70c740 -1 osd.0 0 read_superblock omap replica is missing. 2026-03-09T00:17:18.744 INFO:journalctl@ceph.osd.0.vm04.stdout:Mar 09 00:17:18 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-0[102001]: 2026-03-09T00:17:18.732+0000 7ffb1f70c740 -1 osd.0 97 log_to_monitors true 2026-03-09T00:17:19.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:18 vm10 ceph-mon[82076]: pgmap v23: 161 pgs: 7 active+undersized, 18 stale+active+clean, 6 active+undersized+degraded, 130 active+clean; 457 KiB data, 105 MiB used, 160 GiB / 160 GiB avail; 383 B/s rd, 0 op/s; 25/723 objects degraded (3.458%) 2026-03-09T00:17:19.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:18 vm10 ceph-mon[82076]: Health check failed: Degraded data redundancy: 25/723 objects degraded (3.458%), 6 pgs degraded (PG_DEGRADED) 2026-03-09T00:17:19.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:18 vm10 ceph-mon[82076]: from='osd.0 [v2:192.168.123.104:6802/1982755193,v1:192.168.123.104:6803/1982755193]' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-09T00:17:19.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:18 vm10 ceph-mon[82076]: from='osd.0 ' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-09T00:17:19.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:18 vm04 ceph-mon[94619]: pgmap v23: 161 pgs: 7 active+undersized, 18 stale+active+clean, 6 active+undersized+degraded, 130 active+clean; 457 KiB data, 105 MiB used, 160 GiB / 160 GiB avail; 383 B/s rd, 0 op/s; 25/723 objects degraded (3.458%) 2026-03-09T00:17:19.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:18 vm04 ceph-mon[94619]: Health check failed: Degraded data redundancy: 25/723 objects degraded (3.458%), 6 pgs degraded (PG_DEGRADED) 2026-03-09T00:17:19.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:18 vm04 ceph-mon[94619]: from='osd.0 [v2:192.168.123.104:6802/1982755193,v1:192.168.123.104:6803/1982755193]' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-09T00:17:19.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:18 vm04 ceph-mon[94619]: from='osd.0 ' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-09T00:17:19.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:18 vm04 ceph-mon[96438]: pgmap v23: 161 pgs: 7 active+undersized, 18 stale+active+clean, 6 active+undersized+degraded, 130 active+clean; 457 KiB data, 105 MiB used, 160 GiB / 160 GiB avail; 383 B/s rd, 0 op/s; 25/723 objects degraded (3.458%) 2026-03-09T00:17:19.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:18 vm04 ceph-mon[96438]: Health check failed: Degraded data redundancy: 25/723 objects degraded (3.458%), 6 pgs degraded (PG_DEGRADED) 2026-03-09T00:17:19.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:18 vm04 ceph-mon[96438]: from='osd.0 [v2:192.168.123.104:6802/1982755193,v1:192.168.123.104:6803/1982755193]' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-09T00:17:19.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:18 vm04 ceph-mon[96438]: from='osd.0 ' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-09T00:17:20.269 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:19 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:20.269 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:19 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:20.269 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:19 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:20.269 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:19 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:20.269 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:19 vm04 ceph-mon[96438]: from='osd.0 ' entity='osd.0' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]': finished 2026-03-09T00:17:20.269 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:19 vm04 ceph-mon[96438]: from='osd.0 [v2:192.168.123.104:6802/1982755193,v1:192.168.123.104:6803/1982755193]' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm04", "root=default"]}]: dispatch 2026-03-09T00:17:20.269 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:19 vm04 ceph-mon[96438]: osdmap e100: 8 total, 7 up, 8 in 2026-03-09T00:17:20.269 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:19 vm04 ceph-mon[96438]: from='osd.0 ' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm04", "root=default"]}]: dispatch 2026-03-09T00:17:20.269 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:19 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:20.269 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:19 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:20.269 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:19 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:20.269 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:19 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:20.269 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:19 vm04 ceph-mon[94619]: from='osd.0 ' entity='osd.0' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]': finished 2026-03-09T00:17:20.270 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:19 vm04 ceph-mon[94619]: from='osd.0 [v2:192.168.123.104:6802/1982755193,v1:192.168.123.104:6803/1982755193]' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm04", "root=default"]}]: dispatch 2026-03-09T00:17:20.270 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:19 vm04 ceph-mon[94619]: osdmap e100: 8 total, 7 up, 8 in 2026-03-09T00:17:20.270 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:19 vm04 ceph-mon[94619]: from='osd.0 ' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm04", "root=default"]}]: dispatch 2026-03-09T00:17:20.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:19 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:20.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:19 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:20.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:19 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:20.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:19 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:20.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:19 vm10 ceph-mon[82076]: from='osd.0 ' entity='osd.0' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]': finished 2026-03-09T00:17:20.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:19 vm10 ceph-mon[82076]: from='osd.0 [v2:192.168.123.104:6802/1982755193,v1:192.168.123.104:6803/1982755193]' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm04", "root=default"]}]: dispatch 2026-03-09T00:17:20.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:19 vm10 ceph-mon[82076]: osdmap e100: 8 total, 7 up, 8 in 2026-03-09T00:17:20.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:19 vm10 ceph-mon[82076]: from='osd.0 ' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm04", "root=default"]}]: dispatch 2026-03-09T00:17:20.850 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:17:20 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: 2026-03-09T00:17:20.530+0000 7fe15b141640 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (20 PGs are or would become offline) 2026-03-09T00:17:20.850 INFO:journalctl@ceph.osd.0.vm04.stdout:Mar 09 00:17:20 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-0[102001]: 2026-03-09T00:17:20.710+0000 7ffb16cb6640 -1 osd.0 97 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-09T00:17:21.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:21 vm10 ceph-mon[82076]: pgmap v25: 161 pgs: 38 active+undersized, 20 active+undersized+degraded, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail; 76/723 objects degraded (10.512%) 2026-03-09T00:17:21.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:21 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:21.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:21 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:21.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:21 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:17:21.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:21 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:17:21.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:21 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:21.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:21 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:17:21.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:21 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:17:21.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:21 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:17:21.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:21 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:17:21.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:21 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["1"], "max": 16}]: dispatch 2026-03-09T00:17:21.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:21 vm10 ceph-mon[82076]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["1"], "max": 16}]: dispatch 2026-03-09T00:17:21.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:21 vm10 ceph-mon[82076]: Upgrade: unsafe to stop osd(s) at this time (20 PGs are or would become offline) 2026-03-09T00:17:21.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:21 vm04 ceph-mon[94619]: pgmap v25: 161 pgs: 38 active+undersized, 20 active+undersized+degraded, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail; 76/723 objects degraded (10.512%) 2026-03-09T00:17:21.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:21 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:21.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:21 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:21.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:21 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:17:21.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:21 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:17:21.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:21 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:21.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:21 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:17:21.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:21 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:17:21.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:21 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:17:21.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:21 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:17:21.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:21 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["1"], "max": 16}]: dispatch 2026-03-09T00:17:21.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:21 vm04 ceph-mon[94619]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["1"], "max": 16}]: dispatch 2026-03-09T00:17:21.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:21 vm04 ceph-mon[94619]: Upgrade: unsafe to stop osd(s) at this time (20 PGs are or would become offline) 2026-03-09T00:17:21.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:21 vm04 ceph-mon[96438]: pgmap v25: 161 pgs: 38 active+undersized, 20 active+undersized+degraded, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail; 76/723 objects degraded (10.512%) 2026-03-09T00:17:21.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:21 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:21.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:21 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:21.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:21 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:17:21.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:21 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:17:21.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:21 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:21.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:21 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:17:21.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:21 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:17:21.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:21 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:17:21.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:21 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:17:21.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:21 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["1"], "max": 16}]: dispatch 2026-03-09T00:17:21.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:21 vm04 ceph-mon[96438]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["1"], "max": 16}]: dispatch 2026-03-09T00:17:21.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:21 vm04 ceph-mon[96438]: Upgrade: unsafe to stop osd(s) at this time (20 PGs are or would become offline) 2026-03-09T00:17:22.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:22 vm10 ceph-mon[82076]: OSD bench result of 30148.514901 IOPS is not within the threshold limit range of 50.000000 IOPS and 500.000000 IOPS for osd.0. IOPS capacity is unchanged at 315.000000 IOPS. The recommendation is to establish the osd's IOPS capacity using other benchmark tools (e.g. Fio) and then override osd_mclock_max_capacity_iops_[hdd|ssd]. 2026-03-09T00:17:22.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:22 vm10 ceph-mon[82076]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-09T00:17:22.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:22 vm10 ceph-mon[82076]: osd.0 [v2:192.168.123.104:6802/1982755193,v1:192.168.123.104:6803/1982755193] boot 2026-03-09T00:17:22.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:22 vm10 ceph-mon[82076]: osdmap e101: 8 total, 8 up, 8 in 2026-03-09T00:17:22.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:22 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T00:17:22.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:22 vm04 ceph-mon[96438]: OSD bench result of 30148.514901 IOPS is not within the threshold limit range of 50.000000 IOPS and 500.000000 IOPS for osd.0. IOPS capacity is unchanged at 315.000000 IOPS. The recommendation is to establish the osd's IOPS capacity using other benchmark tools (e.g. Fio) and then override osd_mclock_max_capacity_iops_[hdd|ssd]. 2026-03-09T00:17:22.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:22 vm04 ceph-mon[96438]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-09T00:17:22.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:22 vm04 ceph-mon[96438]: osd.0 [v2:192.168.123.104:6802/1982755193,v1:192.168.123.104:6803/1982755193] boot 2026-03-09T00:17:22.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:22 vm04 ceph-mon[96438]: osdmap e101: 8 total, 8 up, 8 in 2026-03-09T00:17:22.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:22 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T00:17:22.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:22 vm04 ceph-mon[94619]: OSD bench result of 30148.514901 IOPS is not within the threshold limit range of 50.000000 IOPS and 500.000000 IOPS for osd.0. IOPS capacity is unchanged at 315.000000 IOPS. The recommendation is to establish the osd's IOPS capacity using other benchmark tools (e.g. Fio) and then override osd_mclock_max_capacity_iops_[hdd|ssd]. 2026-03-09T00:17:22.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:22 vm04 ceph-mon[94619]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-09T00:17:22.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:22 vm04 ceph-mon[94619]: osd.0 [v2:192.168.123.104:6802/1982755193,v1:192.168.123.104:6803/1982755193] boot 2026-03-09T00:17:22.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:22 vm04 ceph-mon[94619]: osdmap e101: 8 total, 8 up, 8 in 2026-03-09T00:17:22.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:22 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T00:17:23.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:23 vm10 ceph-mon[82076]: pgmap v27: 161 pgs: 38 active+undersized, 20 active+undersized+degraded, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail; 76/723 objects degraded (10.512%) 2026-03-09T00:17:23.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:23 vm10 ceph-mon[82076]: osdmap e102: 8 total, 8 up, 8 in 2026-03-09T00:17:23.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:23 vm04 ceph-mon[96438]: pgmap v27: 161 pgs: 38 active+undersized, 20 active+undersized+degraded, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail; 76/723 objects degraded (10.512%) 2026-03-09T00:17:23.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:23 vm04 ceph-mon[96438]: osdmap e102: 8 total, 8 up, 8 in 2026-03-09T00:17:23.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:23 vm04 ceph-mon[94619]: pgmap v27: 161 pgs: 38 active+undersized, 20 active+undersized+degraded, 103 active+clean; 457 KiB data, 124 MiB used, 160 GiB / 160 GiB avail; 76/723 objects degraded (10.512%) 2026-03-09T00:17:23.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:23 vm04 ceph-mon[94619]: osdmap e102: 8 total, 8 up, 8 in 2026-03-09T00:17:24.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:24 vm10 ceph-mon[82076]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:17:24.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:24 vm04 ceph-mon[96438]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:17:24.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:24 vm04 ceph-mon[94619]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:17:25.350 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:17:25 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:17:25] "GET /metrics HTTP/1.1" 200 37583 "" "Prometheus/2.51.0" 2026-03-09T00:17:25.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:25 vm10 ceph-mon[82076]: pgmap v29: 161 pgs: 14 active+undersized, 7 active+undersized+degraded, 140 active+clean; 457 KiB data, 125 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s; 32/723 objects degraded (4.426%) 2026-03-09T00:17:25.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:25 vm10 ceph-mon[82076]: Health check update: Degraded data redundancy: 32/723 objects degraded (4.426%), 7 pgs degraded (PG_DEGRADED) 2026-03-09T00:17:25.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:25 vm04 ceph-mon[96438]: pgmap v29: 161 pgs: 14 active+undersized, 7 active+undersized+degraded, 140 active+clean; 457 KiB data, 125 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s; 32/723 objects degraded (4.426%) 2026-03-09T00:17:25.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:25 vm04 ceph-mon[96438]: Health check update: Degraded data redundancy: 32/723 objects degraded (4.426%), 7 pgs degraded (PG_DEGRADED) 2026-03-09T00:17:25.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:25 vm04 ceph-mon[94619]: pgmap v29: 161 pgs: 14 active+undersized, 7 active+undersized+degraded, 140 active+clean; 457 KiB data, 125 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s; 32/723 objects degraded (4.426%) 2026-03-09T00:17:25.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:25 vm04 ceph-mon[94619]: Health check update: Degraded data redundancy: 32/723 objects degraded (4.426%), 7 pgs degraded (PG_DEGRADED) 2026-03-09T00:17:26.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:26 vm10 ceph-mon[82076]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 32/723 objects degraded (4.426%), 7 pgs degraded) 2026-03-09T00:17:26.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:26 vm10 ceph-mon[82076]: Cluster is now healthy 2026-03-09T00:17:26.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:26 vm04 ceph-mon[96438]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 32/723 objects degraded (4.426%), 7 pgs degraded) 2026-03-09T00:17:26.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:26 vm04 ceph-mon[96438]: Cluster is now healthy 2026-03-09T00:17:26.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:26 vm04 ceph-mon[94619]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 32/723 objects degraded (4.426%), 7 pgs degraded) 2026-03-09T00:17:26.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:26 vm04 ceph-mon[94619]: Cluster is now healthy 2026-03-09T00:17:27.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:17:27 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:17:27.005Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:17:27.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:17:27 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:17:27.006Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:17:27.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:27 vm10 ceph-mon[82076]: pgmap v30: 161 pgs: 161 active+clean; 457 KiB data, 125 MiB used, 160 GiB / 160 GiB avail; 836 B/s rd, 0 op/s 2026-03-09T00:17:27.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:27 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:17:27.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:27 vm04 ceph-mon[96438]: pgmap v30: 161 pgs: 161 active+clean; 457 KiB data, 125 MiB used, 160 GiB / 160 GiB avail; 836 B/s rd, 0 op/s 2026-03-09T00:17:27.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:27 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:17:27.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:27 vm04 ceph-mon[94619]: pgmap v30: 161 pgs: 161 active+clean; 457 KiB data, 125 MiB used, 160 GiB / 160 GiB avail; 836 B/s rd, 0 op/s 2026-03-09T00:17:27.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:27 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:17:29.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:29 vm10 ceph-mon[82076]: pgmap v31: 161 pgs: 161 active+clean; 457 KiB data, 125 MiB used, 160 GiB / 160 GiB avail; 639 B/s rd, 0 op/s 2026-03-09T00:17:29.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:29 vm04 ceph-mon[96438]: pgmap v31: 161 pgs: 161 active+clean; 457 KiB data, 125 MiB used, 160 GiB / 160 GiB avail; 639 B/s rd, 0 op/s 2026-03-09T00:17:29.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:29 vm04 ceph-mon[94619]: pgmap v31: 161 pgs: 161 active+clean; 457 KiB data, 125 MiB used, 160 GiB / 160 GiB avail; 639 B/s rd, 0 op/s 2026-03-09T00:17:31.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:31 vm10 ceph-mon[82076]: pgmap v32: 161 pgs: 161 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:17:31.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:31 vm04 ceph-mon[96438]: pgmap v32: 161 pgs: 161 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:17:31.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:31 vm04 ceph-mon[94619]: pgmap v32: 161 pgs: 161 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:17:33.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:32 vm10 ceph-mon[82076]: pgmap v33: 161 pgs: 161 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-09T00:17:33.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:32 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:33.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:32 vm04 ceph-mon[96438]: pgmap v33: 161 pgs: 161 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-09T00:17:33.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:32 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:33.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:32 vm04 ceph-mon[94619]: pgmap v33: 161 pgs: 161 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-09T00:17:33.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:32 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:34.404 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:34 vm04 ceph-mon[96438]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:17:34.404 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:34 vm04 ceph-mon[94619]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:17:34.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:34 vm10 ceph-mon[82076]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:17:35.351 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:17:35 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:17:35] "GET /metrics HTTP/1.1" 200 37608 "" "Prometheus/2.51.0" 2026-03-09T00:17:35.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:35 vm04 ceph-mon[96438]: pgmap v34: 161 pgs: 161 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 1.3 KiB/s rd, 1 op/s 2026-03-09T00:17:35.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:35 vm04 ceph-mon[94619]: pgmap v34: 161 pgs: 161 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 1.3 KiB/s rd, 1 op/s 2026-03-09T00:17:35.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:35 vm10 ceph-mon[82076]: pgmap v34: 161 pgs: 161 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 1.3 KiB/s rd, 1 op/s 2026-03-09T00:17:36.701 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:36 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["1"], "max": 16}]: dispatch 2026-03-09T00:17:36.701 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:36 vm04 ceph-mon[94619]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["1"], "max": 16}]: dispatch 2026-03-09T00:17:36.701 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:36 vm04 ceph-mon[94619]: Upgrade: osd.1 is safe to restart 2026-03-09T00:17:36.701 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:36 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["1"], "max": 16}]: dispatch 2026-03-09T00:17:36.701 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:36 vm04 ceph-mon[96438]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["1"], "max": 16}]: dispatch 2026-03-09T00:17:36.701 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:36 vm04 ceph-mon[96438]: Upgrade: osd.1 is safe to restart 2026-03-09T00:17:36.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:36 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["1"], "max": 16}]: dispatch 2026-03-09T00:17:36.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:36 vm10 ceph-mon[82076]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["1"], "max": 16}]: dispatch 2026-03-09T00:17:36.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:36 vm10 ceph-mon[82076]: Upgrade: osd.1 is safe to restart 2026-03-09T00:17:37.227 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:17:37 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:17:37.006Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:17:37.227 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:17:37 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:17:37.007Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:17:37.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:37 vm04 ceph-mon[94619]: pgmap v35: 161 pgs: 161 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:17:37.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:37 vm04 ceph-mon[94619]: Upgrade: Updating osd.1 2026-03-09T00:17:37.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:37 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:37.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:37 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.1"}]: dispatch 2026-03-09T00:17:37.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:37 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:17:37.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:37 vm04 ceph-mon[94619]: Deploying daemon osd.1 on vm04 2026-03-09T00:17:37.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:37 vm04 ceph-mon[96438]: pgmap v35: 161 pgs: 161 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:17:37.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:37 vm04 ceph-mon[96438]: Upgrade: Updating osd.1 2026-03-09T00:17:37.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:37 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:37.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:37 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.1"}]: dispatch 2026-03-09T00:17:37.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:37 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:17:37.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:37 vm04 ceph-mon[96438]: Deploying daemon osd.1 on vm04 2026-03-09T00:17:37.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:37 vm10 ceph-mon[82076]: pgmap v35: 161 pgs: 161 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:17:37.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:37 vm10 ceph-mon[82076]: Upgrade: Updating osd.1 2026-03-09T00:17:37.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:37 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:37.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:37 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.1"}]: dispatch 2026-03-09T00:17:37.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:37 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:17:37.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:37 vm10 ceph-mon[82076]: Deploying daemon osd.1 on vm04 2026-03-09T00:17:38.277 INFO:journalctl@ceph.osd.1.vm04.stdout:Mar 09 00:17:38 vm04 systemd[1]: Stopping Ceph osd.1 for fdcbddf6-1b49-11f1-80b0-7392062373f9... 2026-03-09T00:17:38.582 INFO:journalctl@ceph.osd.1.vm04.stdout:Mar 09 00:17:38 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-1[56800]: 2026-03-09T00:17:38.275+0000 7f158f9fa700 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-09T00:17:38.582 INFO:journalctl@ceph.osd.1.vm04.stdout:Mar 09 00:17:38 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-1[56800]: 2026-03-09T00:17:38.275+0000 7f158f9fa700 -1 osd.1 102 *** Got signal Terminated *** 2026-03-09T00:17:38.582 INFO:journalctl@ceph.osd.1.vm04.stdout:Mar 09 00:17:38 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-1[56800]: 2026-03-09T00:17:38.275+0000 7f158f9fa700 -1 osd.1 102 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-09T00:17:38.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:38 vm04 ceph-mon[96438]: osd.1 marked itself down and dead 2026-03-09T00:17:38.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:38 vm04 ceph-mon[94619]: osd.1 marked itself down and dead 2026-03-09T00:17:39.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:38 vm10 ceph-mon[82076]: osd.1 marked itself down and dead 2026-03-09T00:17:39.228 INFO:journalctl@ceph.osd.1.vm04.stdout:Mar 09 00:17:38 vm04 podman[105829]: 2026-03-09 00:17:38.920843316 +0000 UTC m=+0.696052410 container died f112f05700b8b3c9239d7f15235cf841e4c761f8492664e65f30def432f36a91 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-1, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, distribution-scope=public, name=centos-stream, architecture=x86_64, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, CEPH_POINT_RELEASE=-17.2.0, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, RELEASE=HEAD, GIT_REPO=https://github.com/ceph/ceph-container.git, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, ceph=True, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, vcs-type=git, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, build-date=2022-05-03T08:36:31.336870, io.openshift.tags=base centos centos-stream, version=8, release=754, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.buildah.version=1.19.8, io.openshift.expose-services=, io.k8s.display-name=CentOS Stream 8, com.redhat.component=centos-stream-container, vendor=Red Hat, Inc., GIT_CLEAN=True, maintainer=Guillaume Abrioux , GIT_BRANCH=HEAD) 2026-03-09T00:17:39.479 INFO:journalctl@ceph.osd.1.vm04.stdout:Mar 09 00:17:39 vm04 podman[105829]: 2026-03-09 00:17:39.226745334 +0000 UTC m=+1.001954418 container remove f112f05700b8b3c9239d7f15235cf841e4c761f8492664e65f30def432f36a91 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-1, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., CEPH_POINT_RELEASE=-17.2.0, RELEASE=HEAD, name=centos-stream, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, version=8, io.buildah.version=1.19.8, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, GIT_BRANCH=HEAD, architecture=x86_64, build-date=2022-05-03T08:36:31.336870, io.openshift.tags=base centos centos-stream, GIT_REPO=https://github.com/ceph/ceph-container.git, io.k8s.display-name=CentOS Stream 8, release=754, ceph=True, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, vcs-type=git, vendor=Red Hat, Inc., com.redhat.component=centos-stream-container, io.openshift.expose-services=, maintainer=Guillaume Abrioux , distribution-scope=public, GIT_CLEAN=True) 2026-03-09T00:17:39.479 INFO:journalctl@ceph.osd.1.vm04.stdout:Mar 09 00:17:39 vm04 bash[105829]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-1 2026-03-09T00:17:39.479 INFO:journalctl@ceph.osd.1.vm04.stdout:Mar 09 00:17:39 vm04 podman[105896]: 2026-03-09 00:17:39.401108088 +0000 UTC m=+0.030661081 container create 74c6863803fcda5a55859dfe0f6931b78908d27a94630d413c208573136d659e (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-1-deactivate, org.label-schema.build-date=20260223, CEPH_REF=squid, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.authors=Ceph Release Team , FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, org.label-schema.license=GPLv2) 2026-03-09T00:17:39.479 INFO:journalctl@ceph.osd.1.vm04.stdout:Mar 09 00:17:39 vm04 podman[105896]: 2026-03-09 00:17:39.449422434 +0000 UTC m=+0.078975437 container init 74c6863803fcda5a55859dfe0f6931b78908d27a94630d413c208573136d659e (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-1-deactivate, io.buildah.version=1.41.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.build-date=20260223, CEPH_REF=squid, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS) 2026-03-09T00:17:39.479 INFO:journalctl@ceph.osd.1.vm04.stdout:Mar 09 00:17:39 vm04 podman[105896]: 2026-03-09 00:17:39.455328708 +0000 UTC m=+0.084881690 container start 74c6863803fcda5a55859dfe0f6931b78908d27a94630d413c208573136d659e (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-1-deactivate, org.opencontainers.image.authors=Ceph Release Team , ceph=True, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=squid, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.build-date=20260223, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df) 2026-03-09T00:17:39.794 INFO:journalctl@ceph.osd.1.vm04.stdout:Mar 09 00:17:39 vm04 podman[105896]: 2026-03-09 00:17:39.380714474 +0000 UTC m=+0.010267477 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc 2026-03-09T00:17:39.794 INFO:journalctl@ceph.osd.1.vm04.stdout:Mar 09 00:17:39 vm04 podman[105896]: 2026-03-09 00:17:39.491566633 +0000 UTC m=+0.121119626 container attach 74c6863803fcda5a55859dfe0f6931b78908d27a94630d413c208573136d659e (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-1-deactivate, io.buildah.version=1.41.3, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, CEPH_REF=squid, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git) 2026-03-09T00:17:39.794 INFO:journalctl@ceph.osd.1.vm04.stdout:Mar 09 00:17:39 vm04 podman[105914]: 2026-03-09 00:17:39.617250309 +0000 UTC m=+0.010880111 container died 74c6863803fcda5a55859dfe0f6931b78908d27a94630d413c208573136d659e (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-1-deactivate, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.schema-version=1.0, ceph=True, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.label-schema.build-date=20260223, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3) 2026-03-09T00:17:39.794 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:39 vm04 ceph-mon[96438]: pgmap v36: 161 pgs: 161 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:17:39.795 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:39 vm04 ceph-mon[96438]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-09T00:17:39.795 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:39 vm04 ceph-mon[96438]: osdmap e103: 8 total, 7 up, 8 in 2026-03-09T00:17:39.795 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:39 vm04 ceph-mon[94619]: pgmap v36: 161 pgs: 161 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:17:39.795 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:39 vm04 ceph-mon[94619]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-09T00:17:39.795 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:39 vm04 ceph-mon[94619]: osdmap e103: 8 total, 7 up, 8 in 2026-03-09T00:17:40.051 INFO:journalctl@ceph.osd.1.vm04.stdout:Mar 09 00:17:39 vm04 podman[105914]: 2026-03-09 00:17:39.793907196 +0000 UTC m=+0.187536999 container remove 74c6863803fcda5a55859dfe0f6931b78908d27a94630d413c208573136d659e (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-1-deactivate, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, CEPH_REF=squid, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, io.buildah.version=1.41.3, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.build-date=20260223) 2026-03-09T00:17:40.051 INFO:journalctl@ceph.osd.1.vm04.stdout:Mar 09 00:17:39 vm04 systemd[1]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@osd.1.service: Deactivated successfully. 2026-03-09T00:17:40.051 INFO:journalctl@ceph.osd.1.vm04.stdout:Mar 09 00:17:39 vm04 systemd[1]: Stopped Ceph osd.1 for fdcbddf6-1b49-11f1-80b0-7392062373f9. 2026-03-09T00:17:40.051 INFO:journalctl@ceph.osd.1.vm04.stdout:Mar 09 00:17:39 vm04 systemd[1]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@osd.1.service: Consumed 7.601s CPU time. 2026-03-09T00:17:40.051 INFO:journalctl@ceph.osd.1.vm04.stdout:Mar 09 00:17:40 vm04 systemd[1]: Starting Ceph osd.1 for fdcbddf6-1b49-11f1-80b0-7392062373f9... 2026-03-09T00:17:40.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:39 vm10 ceph-mon[82076]: pgmap v36: 161 pgs: 161 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:17:40.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:39 vm10 ceph-mon[82076]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-09T00:17:40.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:39 vm10 ceph-mon[82076]: osdmap e103: 8 total, 7 up, 8 in 2026-03-09T00:17:40.305 INFO:journalctl@ceph.osd.1.vm04.stdout:Mar 09 00:17:40 vm04 podman[105998]: 2026-03-09 00:17:40.187585949 +0000 UTC m=+0.042197831 container create 1e47c6b607f26add807de987481686953eb19c2032eaa260e9b2f5c16e5d596f (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-1-activate, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.41.3, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.license=GPLv2, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS, org.label-schema.build-date=20260223, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team , OSD_FLAVOR=default, CEPH_REF=squid) 2026-03-09T00:17:40.305 INFO:journalctl@ceph.osd.1.vm04.stdout:Mar 09 00:17:40 vm04 podman[105998]: 2026-03-09 00:17:40.155951108 +0000 UTC m=+0.010562989 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc 2026-03-09T00:17:40.601 INFO:journalctl@ceph.osd.1.vm04.stdout:Mar 09 00:17:40 vm04 podman[105998]: 2026-03-09 00:17:40.345511493 +0000 UTC m=+0.200123374 container init 1e47c6b607f26add807de987481686953eb19c2032eaa260e9b2f5c16e5d596f (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-1-activate, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.build-date=20260223, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-09T00:17:40.601 INFO:journalctl@ceph.osd.1.vm04.stdout:Mar 09 00:17:40 vm04 podman[105998]: 2026-03-09 00:17:40.351291551 +0000 UTC m=+0.205903432 container start 1e47c6b607f26add807de987481686953eb19c2032eaa260e9b2f5c16e5d596f (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-1-activate, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, CEPH_REF=squid, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, ceph=True) 2026-03-09T00:17:40.601 INFO:journalctl@ceph.osd.1.vm04.stdout:Mar 09 00:17:40 vm04 podman[105998]: 2026-03-09 00:17:40.402150018 +0000 UTC m=+0.256761888 container attach 1e47c6b607f26add807de987481686953eb19c2032eaa260e9b2f5c16e5d596f (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-1-activate, ceph=True, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.label-schema.build-date=20260223, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team , FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, CEPH_REF=squid, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git) 2026-03-09T00:17:40.602 INFO:journalctl@ceph.osd.1.vm04.stdout:Mar 09 00:17:40 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-1-activate[106010]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T00:17:40.602 INFO:journalctl@ceph.osd.1.vm04.stdout:Mar 09 00:17:40 vm04 bash[105998]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T00:17:40.602 INFO:journalctl@ceph.osd.1.vm04.stdout:Mar 09 00:17:40 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-1-activate[106010]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T00:17:40.602 INFO:journalctl@ceph.osd.1.vm04.stdout:Mar 09 00:17:40 vm04 bash[105998]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T00:17:41.225 INFO:journalctl@ceph.osd.1.vm04.stdout:Mar 09 00:17:40 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-1-activate[106010]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-09T00:17:41.225 INFO:journalctl@ceph.osd.1.vm04.stdout:Mar 09 00:17:40 vm04 bash[105998]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-09T00:17:41.225 INFO:journalctl@ceph.osd.1.vm04.stdout:Mar 09 00:17:40 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-1-activate[106010]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T00:17:41.225 INFO:journalctl@ceph.osd.1.vm04.stdout:Mar 09 00:17:40 vm04 bash[105998]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T00:17:41.225 INFO:journalctl@ceph.osd.1.vm04.stdout:Mar 09 00:17:40 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-1-activate[106010]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T00:17:41.225 INFO:journalctl@ceph.osd.1.vm04.stdout:Mar 09 00:17:40 vm04 bash[105998]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T00:17:41.225 INFO:journalctl@ceph.osd.1.vm04.stdout:Mar 09 00:17:40 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-1-activate[106010]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-1 2026-03-09T00:17:41.225 INFO:journalctl@ceph.osd.1.vm04.stdout:Mar 09 00:17:40 vm04 bash[105998]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-1 2026-03-09T00:17:41.225 INFO:journalctl@ceph.osd.1.vm04.stdout:Mar 09 00:17:40 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-1-activate[106010]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-8bbda548-d452-4ef7-a02f-dac5a013e067/osd-block-75d29058-61cd-44da-9ebc-7516b509075d --path /var/lib/ceph/osd/ceph-1 --no-mon-config 2026-03-09T00:17:41.225 INFO:journalctl@ceph.osd.1.vm04.stdout:Mar 09 00:17:40 vm04 bash[105998]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-8bbda548-d452-4ef7-a02f-dac5a013e067/osd-block-75d29058-61cd-44da-9ebc-7516b509075d --path /var/lib/ceph/osd/ceph-1 --no-mon-config 2026-03-09T00:17:41.225 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:41 vm04 ceph-mon[96438]: pgmap v38: 161 pgs: 7 peering, 19 stale+active+clean, 135 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-09T00:17:41.225 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:41 vm04 ceph-mon[96438]: osdmap e104: 8 total, 7 up, 8 in 2026-03-09T00:17:41.225 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:41 vm04 ceph-mon[94619]: pgmap v38: 161 pgs: 7 peering, 19 stale+active+clean, 135 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-09T00:17:41.225 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:41 vm04 ceph-mon[94619]: osdmap e104: 8 total, 7 up, 8 in 2026-03-09T00:17:41.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:41 vm10 ceph-mon[82076]: pgmap v38: 161 pgs: 7 peering, 19 stale+active+clean, 135 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-09T00:17:41.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:41 vm10 ceph-mon[82076]: osdmap e104: 8 total, 7 up, 8 in 2026-03-09T00:17:41.533 INFO:journalctl@ceph.osd.1.vm04.stdout:Mar 09 00:17:41 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-1-activate[106010]: Running command: /usr/bin/ln -snf /dev/ceph-8bbda548-d452-4ef7-a02f-dac5a013e067/osd-block-75d29058-61cd-44da-9ebc-7516b509075d /var/lib/ceph/osd/ceph-1/block 2026-03-09T00:17:41.533 INFO:journalctl@ceph.osd.1.vm04.stdout:Mar 09 00:17:41 vm04 bash[105998]: Running command: /usr/bin/ln -snf /dev/ceph-8bbda548-d452-4ef7-a02f-dac5a013e067/osd-block-75d29058-61cd-44da-9ebc-7516b509075d /var/lib/ceph/osd/ceph-1/block 2026-03-09T00:17:41.533 INFO:journalctl@ceph.osd.1.vm04.stdout:Mar 09 00:17:41 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-1-activate[106010]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-1/block 2026-03-09T00:17:41.533 INFO:journalctl@ceph.osd.1.vm04.stdout:Mar 09 00:17:41 vm04 bash[105998]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-1/block 2026-03-09T00:17:41.533 INFO:journalctl@ceph.osd.1.vm04.stdout:Mar 09 00:17:41 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-1-activate[106010]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-1 2026-03-09T00:17:41.533 INFO:journalctl@ceph.osd.1.vm04.stdout:Mar 09 00:17:41 vm04 bash[105998]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-1 2026-03-09T00:17:41.533 INFO:journalctl@ceph.osd.1.vm04.stdout:Mar 09 00:17:41 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-1-activate[106010]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-1 2026-03-09T00:17:41.534 INFO:journalctl@ceph.osd.1.vm04.stdout:Mar 09 00:17:41 vm04 bash[105998]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-1 2026-03-09T00:17:41.534 INFO:journalctl@ceph.osd.1.vm04.stdout:Mar 09 00:17:41 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-1-activate[106010]: --> ceph-volume lvm activate successful for osd ID: 1 2026-03-09T00:17:41.534 INFO:journalctl@ceph.osd.1.vm04.stdout:Mar 09 00:17:41 vm04 bash[105998]: --> ceph-volume lvm activate successful for osd ID: 1 2026-03-09T00:17:41.534 INFO:journalctl@ceph.osd.1.vm04.stdout:Mar 09 00:17:41 vm04 conmon[106010]: conmon 1e47c6b607f26add807d : Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-1e47c6b607f26add807de987481686953eb19c2032eaa260e9b2f5c16e5d596f.scope/container/memory.events 2026-03-09T00:17:41.534 INFO:journalctl@ceph.osd.1.vm04.stdout:Mar 09 00:17:41 vm04 podman[105998]: 2026-03-09 00:17:41.253667826 +0000 UTC m=+1.108279707 container died 1e47c6b607f26add807de987481686953eb19c2032eaa260e9b2f5c16e5d596f (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-1-activate, org.opencontainers.image.authors=Ceph Release Team , FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.label-schema.build-date=20260223, ceph=True, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, io.buildah.version=1.41.3) 2026-03-09T00:17:41.534 INFO:journalctl@ceph.osd.1.vm04.stdout:Mar 09 00:17:41 vm04 podman[105998]: 2026-03-09 00:17:41.374202566 +0000 UTC m=+1.228814436 container remove 1e47c6b607f26add807de987481686953eb19c2032eaa260e9b2f5c16e5d596f (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-1-activate, ceph=True, io.buildah.version=1.41.3, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.build-date=20260223, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid, org.label-schema.vendor=CentOS, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9) 2026-03-09T00:17:41.852 INFO:journalctl@ceph.osd.1.vm04.stdout:Mar 09 00:17:41 vm04 podman[106246]: 2026-03-09 00:17:41.530954232 +0000 UTC m=+0.058370695 container create 522cf40e592dc4a5808ef2a62ca73007729f265ac0cf472a5bc36247b3785861 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-1, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20260223, org.label-schema.schema-version=1.0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, CEPH_REF=squid) 2026-03-09T00:17:41.852 INFO:journalctl@ceph.osd.1.vm04.stdout:Mar 09 00:17:41 vm04 podman[106246]: 2026-03-09 00:17:41.481959393 +0000 UTC m=+0.009375866 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc 2026-03-09T00:17:41.852 INFO:journalctl@ceph.osd.1.vm04.stdout:Mar 09 00:17:41 vm04 podman[106246]: 2026-03-09 00:17:41.588058968 +0000 UTC m=+0.115475431 container init 522cf40e592dc4a5808ef2a62ca73007729f265ac0cf472a5bc36247b3785861 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-1, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team , ceph=True, CEPH_REF=squid, OSD_FLAVOR=default, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.vendor=CentOS, org.label-schema.build-date=20260223) 2026-03-09T00:17:41.852 INFO:journalctl@ceph.osd.1.vm04.stdout:Mar 09 00:17:41 vm04 podman[106246]: 2026-03-09 00:17:41.592241867 +0000 UTC m=+0.119658330 container start 522cf40e592dc4a5808ef2a62ca73007729f265ac0cf472a5bc36247b3785861 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-1, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.authors=Ceph Release Team , ceph=True, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, CEPH_REF=squid, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-09T00:17:41.852 INFO:journalctl@ceph.osd.1.vm04.stdout:Mar 09 00:17:41 vm04 bash[106246]: 522cf40e592dc4a5808ef2a62ca73007729f265ac0cf472a5bc36247b3785861 2026-03-09T00:17:41.852 INFO:journalctl@ceph.osd.1.vm04.stdout:Mar 09 00:17:41 vm04 systemd[1]: Started Ceph osd.1 for fdcbddf6-1b49-11f1-80b0-7392062373f9. 2026-03-09T00:17:42.518 INFO:journalctl@ceph.osd.1.vm04.stdout:Mar 09 00:17:42 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-1[106256]: 2026-03-09T00:17:42.177+0000 7f8a281ba740 -1 Falling back to public interface 2026-03-09T00:17:42.833 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:42 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:42.833 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:42 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:42.833 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:42 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:42.833 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:42 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:17:42.834 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:42 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:42.834 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:42 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:17:42.834 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:42 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:42.834 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:42 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:42.834 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:42 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:42.834 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:42 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:17:42.834 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:42 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:42.834 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:42 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:17:43.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:42 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:43.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:42 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:43.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:42 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:43.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:42 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:17:43.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:42 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:43.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:42 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:17:43.653 INFO:journalctl@ceph.osd.1.vm04.stdout:Mar 09 00:17:43 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-1[106256]: 2026-03-09T00:17:43.536+0000 7f8a281ba740 -1 osd.1 0 read_superblock omap replica is missing. 2026-03-09T00:17:43.653 INFO:journalctl@ceph.osd.1.vm04.stdout:Mar 09 00:17:43 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-1[106256]: 2026-03-09T00:17:43.595+0000 7f8a281ba740 -1 osd.1 102 log_to_monitors true 2026-03-09T00:17:43.950 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:43 vm04 ceph-mon[94619]: pgmap v40: 161 pgs: 7 peering, 19 stale+active+clean, 135 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 639 B/s rd, 0 op/s 2026-03-09T00:17:43.950 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:43 vm04 ceph-mon[94619]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:17:43.950 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:43 vm04 ceph-mon[94619]: from='osd.1 [v2:192.168.123.104:6810/3899700996,v1:192.168.123.104:6811/3899700996]' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-09T00:17:43.950 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:43 vm04 ceph-mon[94619]: from='osd.1 ' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-09T00:17:44.240 INFO:journalctl@ceph.osd.1.vm04.stdout:Mar 09 00:17:44 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-1[106256]: 2026-03-09T00:17:44.078+0000 7f8a1ff65640 -1 osd.1 102 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-09T00:17:44.240 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:43 vm04 ceph-mon[96438]: pgmap v40: 161 pgs: 7 peering, 19 stale+active+clean, 135 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 639 B/s rd, 0 op/s 2026-03-09T00:17:44.240 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:43 vm04 ceph-mon[96438]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:17:44.240 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:43 vm04 ceph-mon[96438]: from='osd.1 [v2:192.168.123.104:6810/3899700996,v1:192.168.123.104:6811/3899700996]' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-09T00:17:44.240 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:43 vm04 ceph-mon[96438]: from='osd.1 ' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-09T00:17:44.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:43 vm10 ceph-mon[82076]: pgmap v40: 161 pgs: 7 peering, 19 stale+active+clean, 135 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 639 B/s rd, 0 op/s 2026-03-09T00:17:44.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:43 vm10 ceph-mon[82076]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:17:44.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:43 vm10 ceph-mon[82076]: from='osd.1 [v2:192.168.123.104:6810/3899700996,v1:192.168.123.104:6811/3899700996]' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-09T00:17:44.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:43 vm10 ceph-mon[82076]: from='osd.1 ' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-09T00:17:44.605 INFO:teuthology.orchestra.run.vm04.stdout:true 2026-03-09T00:17:45.339 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-09T00:17:45.339 INFO:teuthology.orchestra.run.vm04.stdout:alertmanager.a vm04 *:9093,9094 running (14m) 0s ago 21m 28.2M - 0.25.0 c8568f914cd2 bcac0140b0f6 2026-03-09T00:17:45.339 INFO:teuthology.orchestra.run.vm04.stdout:grafana.a vm10 *:3000 running (2m) 61s ago 20m 78.5M - 10.4.0 c8b91775d855 aa7f793dcb8e 2026-03-09T00:17:45.339 INFO:teuthology.orchestra.run.vm04.stdout:iscsi.foo.vm04.fbyciv vm04 running (2m) 0s ago 20m 51.5M - 3.5 e1d6a67b021e cdb4168e72eb 2026-03-09T00:17:45.339 INFO:teuthology.orchestra.run.vm04.stdout:mgr.x vm10 *:8443,9283,8765 running (2m) 61s ago 22m 485M - 19.2.3-678-ge911bdeb 654f31e6858e c971ca6e9652 2026-03-09T00:17:45.339 INFO:teuthology.orchestra.run.vm04.stdout:mgr.y vm04 *:8443,9283,8765 running (11m) 0s ago 23m 551M - 19.2.3-678-ge911bdeb 654f31e6858e 72a51572b51b 2026-03-09T00:17:45.339 INFO:teuthology.orchestra.run.vm04.stdout:mon.a vm04 running (68s) 0s ago 23m 49.5M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 3a1ecb9ee7d1 2026-03-09T00:17:45.339 INFO:teuthology.orchestra.run.vm04.stdout:mon.b vm10 running (104s) 61s ago 23m 41.3M 2048M 19.2.3-678-ge911bdeb 654f31e6858e b102ade927df 2026-03-09T00:17:45.339 INFO:teuthology.orchestra.run.vm04.stdout:mon.c vm04 running (58s) 0s ago 23m 38.6M 2048M 19.2.3-678-ge911bdeb 654f31e6858e d5119f6d2345 2026-03-09T00:17:45.339 INFO:teuthology.orchestra.run.vm04.stdout:node-exporter.a vm04 *:9100 running (14m) 0s ago 21m 9995k - 1.7.0 72c9c2088986 38e0af6b2fbf 2026-03-09T00:17:45.340 INFO:teuthology.orchestra.run.vm04.stdout:node-exporter.b vm10 *:9100 running (14m) 61s ago 21m 10.1M - 1.7.0 72c9c2088986 d059c0022310 2026-03-09T00:17:45.340 INFO:teuthology.orchestra.run.vm04.stdout:osd.0 vm04 running (27s) 0s ago 22m 68.0M 4096M 19.2.3-678-ge911bdeb 654f31e6858e a5eb77bcb38b 2026-03-09T00:17:45.340 INFO:teuthology.orchestra.run.vm04.stdout:osd.1 vm04 running (3s) 0s ago 22m 15.4M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 522cf40e592d 2026-03-09T00:17:45.340 INFO:teuthology.orchestra.run.vm04.stdout:osd.2 vm04 running (22m) 0s ago 22m 53.7M 4096M 17.2.0 e1d6a67b021e a4ed5ecab7e4 2026-03-09T00:17:45.340 INFO:teuthology.orchestra.run.vm04.stdout:osd.3 vm04 running (22m) 0s ago 22m 56.7M 4096M 17.2.0 e1d6a67b021e d530f6e786d9 2026-03-09T00:17:45.340 INFO:teuthology.orchestra.run.vm04.stdout:osd.4 vm10 running (22m) 61s ago 22m 55.0M 4096M 17.2.0 e1d6a67b021e ad302e6f363c 2026-03-09T00:17:45.340 INFO:teuthology.orchestra.run.vm04.stdout:osd.5 vm10 running (22m) 61s ago 22m 53.8M 4096M 17.2.0 e1d6a67b021e 1bef86fdb303 2026-03-09T00:17:45.340 INFO:teuthology.orchestra.run.vm04.stdout:osd.6 vm10 running (21m) 61s ago 21m 52.4M 4096M 17.2.0 e1d6a67b021e 168db5828111 2026-03-09T00:17:45.340 INFO:teuthology.orchestra.run.vm04.stdout:osd.7 vm10 running (21m) 61s ago 21m 57.2M 4096M 17.2.0 e1d6a67b021e bc6bbac15079 2026-03-09T00:17:45.340 INFO:teuthology.orchestra.run.vm04.stdout:prometheus.a vm10 *:9095 running (2m) 61s ago 21m 51.3M - 2.51.0 1d3b7f56885b 1f53121cfa7f 2026-03-09T00:17:45.340 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.ehrfsf vm04 *:8000 running (20m) 0s ago 20m 99.8M - 17.2.0 e1d6a67b021e a815abb0c790 2026-03-09T00:17:45.340 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm10.dwizvi vm10 *:8000 running (20m) 61s ago 20m 97.3M - 17.2.0 e1d6a67b021e f6412acdf6e0 2026-03-09T00:17:45.340 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:45 vm04 ceph-mon[96438]: from='osd.1 ' entity='osd.1' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]': finished 2026-03-09T00:17:45.340 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:45 vm04 ceph-mon[96438]: osdmap e105: 8 total, 7 up, 8 in 2026-03-09T00:17:45.340 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:45 vm04 ceph-mon[96438]: pgmap v42: 161 pgs: 37 active+undersized, 7 peering, 19 active+undersized+degraded, 98 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 94/723 objects degraded (13.001%) 2026-03-09T00:17:45.340 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:45 vm04 ceph-mon[96438]: from='osd.1 [v2:192.168.123.104:6810/3899700996,v1:192.168.123.104:6811/3899700996]' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm04", "root=default"]}]: dispatch 2026-03-09T00:17:45.340 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:45 vm04 ceph-mon[96438]: from='osd.1 ' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm04", "root=default"]}]: dispatch 2026-03-09T00:17:45.340 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:45 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:45.340 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:45 vm04 ceph-mon[96438]: from='client.34244 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:17:45.340 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:45 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:45.340 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:45 vm04 ceph-mon[96438]: Health check failed: Degraded data redundancy: 94/723 objects degraded (13.001%), 19 pgs degraded (PG_DEGRADED) 2026-03-09T00:17:45.344 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:45 vm04 ceph-mon[94619]: from='osd.1 ' entity='osd.1' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]': finished 2026-03-09T00:17:45.344 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:45 vm04 ceph-mon[94619]: osdmap e105: 8 total, 7 up, 8 in 2026-03-09T00:17:45.345 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:45 vm04 ceph-mon[94619]: pgmap v42: 161 pgs: 37 active+undersized, 7 peering, 19 active+undersized+degraded, 98 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 94/723 objects degraded (13.001%) 2026-03-09T00:17:45.345 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:45 vm04 ceph-mon[94619]: from='osd.1 [v2:192.168.123.104:6810/3899700996,v1:192.168.123.104:6811/3899700996]' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm04", "root=default"]}]: dispatch 2026-03-09T00:17:45.345 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:45 vm04 ceph-mon[94619]: from='osd.1 ' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm04", "root=default"]}]: dispatch 2026-03-09T00:17:45.345 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:45 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:45.345 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:45 vm04 ceph-mon[94619]: from='client.34244 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:17:45.345 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:45 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:45.345 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:45 vm04 ceph-mon[94619]: Health check failed: Degraded data redundancy: 94/723 objects degraded (13.001%), 19 pgs degraded (PG_DEGRADED) 2026-03-09T00:17:45.345 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:17:45 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:17:45] "GET /metrics HTTP/1.1" 200 37659 "" "Prometheus/2.51.0" 2026-03-09T00:17:45.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:45 vm10 ceph-mon[82076]: from='osd.1 ' entity='osd.1' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]': finished 2026-03-09T00:17:45.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:45 vm10 ceph-mon[82076]: osdmap e105: 8 total, 7 up, 8 in 2026-03-09T00:17:45.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:45 vm10 ceph-mon[82076]: pgmap v42: 161 pgs: 37 active+undersized, 7 peering, 19 active+undersized+degraded, 98 active+clean; 457 KiB data, 126 MiB used, 160 GiB / 160 GiB avail; 94/723 objects degraded (13.001%) 2026-03-09T00:17:45.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:45 vm10 ceph-mon[82076]: from='osd.1 [v2:192.168.123.104:6810/3899700996,v1:192.168.123.104:6811/3899700996]' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm04", "root=default"]}]: dispatch 2026-03-09T00:17:45.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:45 vm10 ceph-mon[82076]: from='osd.1 ' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm04", "root=default"]}]: dispatch 2026-03-09T00:17:45.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:45 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:45.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:45 vm10 ceph-mon[82076]: from='client.34244 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:17:45.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:45 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:45.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:45 vm10 ceph-mon[82076]: Health check failed: Degraded data redundancy: 94/723 objects degraded (13.001%), 19 pgs degraded (PG_DEGRADED) 2026-03-09T00:17:45.827 INFO:teuthology.orchestra.run.vm04.stdout:{ 2026-03-09T00:17:45.827 INFO:teuthology.orchestra.run.vm04.stdout: "mon": { 2026-03-09T00:17:45.827 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 3 2026-03-09T00:17:45.827 INFO:teuthology.orchestra.run.vm04.stdout: }, 2026-03-09T00:17:45.827 INFO:teuthology.orchestra.run.vm04.stdout: "mgr": { 2026-03-09T00:17:45.827 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-09T00:17:45.827 INFO:teuthology.orchestra.run.vm04.stdout: }, 2026-03-09T00:17:45.827 INFO:teuthology.orchestra.run.vm04.stdout: "osd": { 2026-03-09T00:17:45.827 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 6, 2026-03-09T00:17:45.827 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-09T00:17:45.827 INFO:teuthology.orchestra.run.vm04.stdout: }, 2026-03-09T00:17:45.827 INFO:teuthology.orchestra.run.vm04.stdout: "rgw": { 2026-03-09T00:17:45.827 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 2 2026-03-09T00:17:45.827 INFO:teuthology.orchestra.run.vm04.stdout: }, 2026-03-09T00:17:45.827 INFO:teuthology.orchestra.run.vm04.stdout: "overall": { 2026-03-09T00:17:45.827 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 8, 2026-03-09T00:17:45.827 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 7 2026-03-09T00:17:45.827 INFO:teuthology.orchestra.run.vm04.stdout: } 2026-03-09T00:17:45.827 INFO:teuthology.orchestra.run.vm04.stdout:} 2026-03-09T00:17:46.147 INFO:teuthology.orchestra.run.vm04.stdout:{ 2026-03-09T00:17:46.147 INFO:teuthology.orchestra.run.vm04.stdout: "target_image": "quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc", 2026-03-09T00:17:46.147 INFO:teuthology.orchestra.run.vm04.stdout: "in_progress": true, 2026-03-09T00:17:46.147 INFO:teuthology.orchestra.run.vm04.stdout: "which": "Upgrading daemons of type(s) osd. Upgrade limited to 2 daemons (0 remaining).", 2026-03-09T00:17:46.147 INFO:teuthology.orchestra.run.vm04.stdout: "services_complete": [], 2026-03-09T00:17:46.147 INFO:teuthology.orchestra.run.vm04.stdout: "progress": "2/8 daemons upgraded", 2026-03-09T00:17:46.148 INFO:teuthology.orchestra.run.vm04.stdout: "message": "Currently upgrading osd daemons", 2026-03-09T00:17:46.148 INFO:teuthology.orchestra.run.vm04.stdout: "is_paused": false 2026-03-09T00:17:46.148 INFO:teuthology.orchestra.run.vm04.stdout:} 2026-03-09T00:17:46.734 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:46 vm04 ceph-mon[96438]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-09T00:17:46.734 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:46 vm04 ceph-mon[96438]: Cluster is now healthy 2026-03-09T00:17:46.734 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:46 vm04 ceph-mon[96438]: from='client.54187 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:17:46.734 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:46 vm04 ceph-mon[96438]: osd.1 [v2:192.168.123.104:6810/3899700996,v1:192.168.123.104:6811/3899700996] boot 2026-03-09T00:17:46.734 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:46 vm04 ceph-mon[96438]: osdmap e106: 8 total, 8 up, 8 in 2026-03-09T00:17:46.734 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:46 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T00:17:46.734 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:46 vm04 ceph-mon[96438]: from='client.54193 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:17:46.734 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:46 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:46.734 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:46 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:46.734 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:46 vm04 ceph-mon[96438]: from='client.? 192.168.123.104:0/1533394717' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:17:46.734 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:46 vm04 ceph-mon[94619]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-09T00:17:46.735 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:46 vm04 ceph-mon[94619]: Cluster is now healthy 2026-03-09T00:17:46.735 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:46 vm04 ceph-mon[94619]: from='client.54187 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:17:46.735 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:46 vm04 ceph-mon[94619]: osd.1 [v2:192.168.123.104:6810/3899700996,v1:192.168.123.104:6811/3899700996] boot 2026-03-09T00:17:46.735 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:46 vm04 ceph-mon[94619]: osdmap e106: 8 total, 8 up, 8 in 2026-03-09T00:17:46.735 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:46 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T00:17:46.735 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:46 vm04 ceph-mon[94619]: from='client.54193 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:17:46.735 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:46 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:46.735 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:46 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:46.735 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:46 vm04 ceph-mon[94619]: from='client.? 192.168.123.104:0/1533394717' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:17:46.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:46 vm10 ceph-mon[82076]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-09T00:17:46.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:46 vm10 ceph-mon[82076]: Cluster is now healthy 2026-03-09T00:17:46.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:46 vm10 ceph-mon[82076]: from='client.54187 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:17:46.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:46 vm10 ceph-mon[82076]: osd.1 [v2:192.168.123.104:6810/3899700996,v1:192.168.123.104:6811/3899700996] boot 2026-03-09T00:17:46.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:46 vm10 ceph-mon[82076]: osdmap e106: 8 total, 8 up, 8 in 2026-03-09T00:17:46.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:46 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T00:17:46.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:46 vm10 ceph-mon[82076]: from='client.54193 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:17:46.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:46 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:46.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:46 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:46.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:46 vm10 ceph-mon[82076]: from='client.? 192.168.123.104:0/1533394717' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:17:47.008 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:17:47 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:17:47.006Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:17:47.351 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:17:47 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:17:47.008Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:17:47.622 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:47 vm04 ceph-mon[94619]: pgmap v44: 161 pgs: 42 active+undersized, 21 active+undersized+degraded, 98 active+clean; 457 KiB data, 144 MiB used, 160 GiB / 160 GiB avail; 101/723 objects degraded (13.970%) 2026-03-09T00:17:47.622 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:47 vm04 ceph-mon[94619]: from='client.54205 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:17:47.622 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:47 vm04 ceph-mon[94619]: osdmap e107: 8 total, 8 up, 8 in 2026-03-09T00:17:47.622 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:47 vm04 ceph-mon[96438]: pgmap v44: 161 pgs: 42 active+undersized, 21 active+undersized+degraded, 98 active+clean; 457 KiB data, 144 MiB used, 160 GiB / 160 GiB avail; 101/723 objects degraded (13.970%) 2026-03-09T00:17:47.622 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:47 vm04 ceph-mon[96438]: from='client.54205 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:17:47.622 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:47 vm04 ceph-mon[96438]: osdmap e107: 8 total, 8 up, 8 in 2026-03-09T00:17:47.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:47 vm10 ceph-mon[82076]: pgmap v44: 161 pgs: 42 active+undersized, 21 active+undersized+degraded, 98 active+clean; 457 KiB data, 144 MiB used, 160 GiB / 160 GiB avail; 101/723 objects degraded (13.970%) 2026-03-09T00:17:47.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:47 vm10 ceph-mon[82076]: from='client.54205 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:17:47.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:47 vm10 ceph-mon[82076]: osdmap e107: 8 total, 8 up, 8 in 2026-03-09T00:17:49.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:49.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[96438]: pgmap v46: 161 pgs: 33 active+undersized, 14 active+undersized+degraded, 114 active+clean; 457 KiB data, 145 MiB used, 160 GiB / 160 GiB avail; 84/723 objects degraded (11.618%) 2026-03-09T00:17:49.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:49.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:17:49.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:17:49.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:49.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:17:49.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:17:49.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:17:49.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[96438]: Upgrade: Setting container_image for all nvmeof 2026-03-09T00:17:49.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:49.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:17:49.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:17:49.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:17:49.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:17:49.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:17:49.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:17:49.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[96438]: Upgrade: Finalizing container_image settings 2026-03-09T00:17:49.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]: dispatch 2026-03-09T00:17:49.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]': finished 2026-03-09T00:17:49.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:17:49.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon"}]': finished 2026-03-09T00:17:49.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]: dispatch 2026-03-09T00:17:49.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]': finished 2026-03-09T00:17:49.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd"}]: dispatch 2026-03-09T00:17:49.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mds"}]: dispatch 2026-03-09T00:17:49.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw"}]: dispatch 2026-03-09T00:17:49.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]: dispatch 2026-03-09T00:17:49.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:17:49.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]: dispatch 2026-03-09T00:17:49.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.iscsi"}]: dispatch 2026-03-09T00:17:49.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]: dispatch 2026-03-09T00:17:49.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]: dispatch 2026-03-09T00:17:49.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]': finished 2026-03-09T00:17:49.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:17:49.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:17:49.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:17:49.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:17:49.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:17:49.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:17:49.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[96438]: Upgrade: Complete! 2026-03-09T00:17:49.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]: dispatch 2026-03-09T00:17:49.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]': finished 2026-03-09T00:17:49.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:17:49.352 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:17:49.352 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:17:49.352 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:49.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:49.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[94619]: pgmap v46: 161 pgs: 33 active+undersized, 14 active+undersized+degraded, 114 active+clean; 457 KiB data, 145 MiB used, 160 GiB / 160 GiB avail; 84/723 objects degraded (11.618%) 2026-03-09T00:17:49.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:49.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:17:49.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:17:49.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:49.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:17:49.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:17:49.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:17:49.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[94619]: Upgrade: Setting container_image for all nvmeof 2026-03-09T00:17:49.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:49.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:17:49.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:17:49.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:17:49.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:17:49.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:17:49.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:17:49.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[94619]: Upgrade: Finalizing container_image settings 2026-03-09T00:17:49.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]: dispatch 2026-03-09T00:17:49.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]': finished 2026-03-09T00:17:49.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:17:49.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon"}]': finished 2026-03-09T00:17:49.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]: dispatch 2026-03-09T00:17:49.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]': finished 2026-03-09T00:17:49.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd"}]: dispatch 2026-03-09T00:17:49.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mds"}]: dispatch 2026-03-09T00:17:49.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw"}]: dispatch 2026-03-09T00:17:49.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]: dispatch 2026-03-09T00:17:49.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:17:49.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]: dispatch 2026-03-09T00:17:49.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.iscsi"}]: dispatch 2026-03-09T00:17:49.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]: dispatch 2026-03-09T00:17:49.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]: dispatch 2026-03-09T00:17:49.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]': finished 2026-03-09T00:17:49.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:17:49.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:17:49.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:17:49.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:17:49.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:17:49.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:17:49.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[94619]: Upgrade: Complete! 2026-03-09T00:17:49.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]: dispatch 2026-03-09T00:17:49.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]': finished 2026-03-09T00:17:49.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:17:49.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:17:49.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:17:49.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:49 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:49.579 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:49 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:49.579 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:49 vm10 ceph-mon[82076]: pgmap v46: 161 pgs: 33 active+undersized, 14 active+undersized+degraded, 114 active+clean; 457 KiB data, 145 MiB used, 160 GiB / 160 GiB avail; 84/723 objects degraded (11.618%) 2026-03-09T00:17:49.579 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:49 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:49.579 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:49 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:17:49.579 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:49 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:17:49.579 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:49 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:49.579 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:49 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:17:49.579 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:49 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:17:49.579 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:49 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:17:49.579 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:49 vm10 ceph-mon[82076]: Upgrade: Setting container_image for all nvmeof 2026-03-09T00:17:49.579 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:49 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:49.579 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:49 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:17:49.579 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:49 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:17:49.579 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:49 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:17:49.579 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:49 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:17:49.579 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:49 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:17:49.579 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:49 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:17:49.579 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:49 vm10 ceph-mon[82076]: Upgrade: Finalizing container_image settings 2026-03-09T00:17:49.579 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:49 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]: dispatch 2026-03-09T00:17:49.579 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:49 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]': finished 2026-03-09T00:17:49.579 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:49 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:17:49.579 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:49 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon"}]': finished 2026-03-09T00:17:49.579 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:49 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]: dispatch 2026-03-09T00:17:49.579 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:49 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]': finished 2026-03-09T00:17:49.579 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:49 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd"}]: dispatch 2026-03-09T00:17:49.579 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:49 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mds"}]: dispatch 2026-03-09T00:17:49.579 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:49 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw"}]: dispatch 2026-03-09T00:17:49.579 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:49 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]: dispatch 2026-03-09T00:17:49.579 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:49 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:17:49.579 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:49 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]: dispatch 2026-03-09T00:17:49.579 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:49 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.iscsi"}]: dispatch 2026-03-09T00:17:49.579 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:49 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]: dispatch 2026-03-09T00:17:49.579 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:49 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]: dispatch 2026-03-09T00:17:49.579 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:49 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]': finished 2026-03-09T00:17:49.579 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:49 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:17:49.579 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:49 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:17:49.579 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:49 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:17:49.579 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:49 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:17:49.579 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:49 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:17:49.579 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:49 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:17:49.579 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:49 vm10 ceph-mon[82076]: Upgrade: Complete! 2026-03-09T00:17:49.579 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:49 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]: dispatch 2026-03-09T00:17:49.579 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:49 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]': finished 2026-03-09T00:17:49.579 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:49 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:17:49.579 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:49 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:17:49.579 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:49 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:17:49.579 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:49 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:50.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:50 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:17:50.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:50 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:17:50.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:50 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:17:50.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:50 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:50.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:50 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:17:50.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:50 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:17:50.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:50 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:17:50.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:50 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:50.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:50 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:17:50.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:50 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:17:50.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:50 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:17:50.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:50 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:51.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:51 vm10 ceph-mon[82076]: pgmap v47: 161 pgs: 161 active+clean; 457 KiB data, 146 MiB used, 160 GiB / 160 GiB avail; 852 B/s rd, 0 op/s 2026-03-09T00:17:51.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:51 vm10 ceph-mon[82076]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 84/723 objects degraded (11.618%), 14 pgs degraded) 2026-03-09T00:17:51.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:51 vm10 ceph-mon[82076]: Cluster is now healthy 2026-03-09T00:17:51.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:51 vm04 ceph-mon[94619]: pgmap v47: 161 pgs: 161 active+clean; 457 KiB data, 146 MiB used, 160 GiB / 160 GiB avail; 852 B/s rd, 0 op/s 2026-03-09T00:17:51.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:51 vm04 ceph-mon[94619]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 84/723 objects degraded (11.618%), 14 pgs degraded) 2026-03-09T00:17:51.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:51 vm04 ceph-mon[94619]: Cluster is now healthy 2026-03-09T00:17:51.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:51 vm04 ceph-mon[96438]: pgmap v47: 161 pgs: 161 active+clean; 457 KiB data, 146 MiB used, 160 GiB / 160 GiB avail; 852 B/s rd, 0 op/s 2026-03-09T00:17:51.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:51 vm04 ceph-mon[96438]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 84/723 objects degraded (11.618%), 14 pgs degraded) 2026-03-09T00:17:51.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:51 vm04 ceph-mon[96438]: Cluster is now healthy 2026-03-09T00:17:53.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:53 vm04 ceph-mon[94619]: pgmap v48: 161 pgs: 161 active+clean; 457 KiB data, 146 MiB used, 160 GiB / 160 GiB avail; 639 B/s rd, 0 op/s 2026-03-09T00:17:53.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:53 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:53.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:53 vm04 ceph-mon[96438]: pgmap v48: 161 pgs: 161 active+clean; 457 KiB data, 146 MiB used, 160 GiB / 160 GiB avail; 639 B/s rd, 0 op/s 2026-03-09T00:17:53.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:53 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:53.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:53 vm10 ceph-mon[82076]: pgmap v48: 161 pgs: 161 active+clean; 457 KiB data, 146 MiB used, 160 GiB / 160 GiB avail; 639 B/s rd, 0 op/s 2026-03-09T00:17:53.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:53 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:54.602 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:54 vm04 ceph-mon[96438]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:17:54.602 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:54 vm04 ceph-mon[94619]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:17:54.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:54 vm10 ceph-mon[82076]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:17:55.350 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:17:55 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:17:55] "GET /metrics HTTP/1.1" 200 37659 "" "Prometheus/2.51.0" 2026-03-09T00:17:55.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:55 vm10 ceph-mon[82076]: pgmap v49: 161 pgs: 161 active+clean; 457 KiB data, 146 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-09T00:17:55.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:55 vm04 ceph-mon[94619]: pgmap v49: 161 pgs: 161 active+clean; 457 KiB data, 146 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-09T00:17:55.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:55 vm04 ceph-mon[96438]: pgmap v49: 161 pgs: 161 active+clean; 457 KiB data, 146 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-09T00:17:57.351 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:17:57 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:17:57.008Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:17:57.351 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:17:57 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:17:57.008Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:17:58.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:57 vm10 ceph-mon[82076]: pgmap v50: 161 pgs: 161 active+clean; 457 KiB data, 146 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-09T00:17:58.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:57 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:58.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:57 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:17:58.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:57 vm04 ceph-mon[96438]: pgmap v50: 161 pgs: 161 active+clean; 457 KiB data, 146 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-09T00:17:58.109 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:57 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:58.109 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:57 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:17:58.109 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:57 vm04 ceph-mon[94619]: pgmap v50: 161 pgs: 161 active+clean; 457 KiB data, 146 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-09T00:17:58.109 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:57 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:17:58.109 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:57 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:17:59.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:17:58 vm10 ceph-mon[82076]: pgmap v51: 161 pgs: 161 active+clean; 457 KiB data, 146 MiB used, 160 GiB / 160 GiB avail; 881 B/s rd, 0 op/s 2026-03-09T00:17:59.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:17:58 vm04 ceph-mon[94619]: pgmap v51: 161 pgs: 161 active+clean; 457 KiB data, 146 MiB used, 160 GiB / 160 GiB avail; 881 B/s rd, 0 op/s 2026-03-09T00:17:59.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:17:58 vm04 ceph-mon[96438]: pgmap v51: 161 pgs: 161 active+clean; 457 KiB data, 146 MiB used, 160 GiB / 160 GiB avail; 881 B/s rd, 0 op/s 2026-03-09T00:18:01.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:01 vm10 ceph-mon[82076]: pgmap v52: 161 pgs: 161 active+clean; 457 KiB data, 146 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:18:01.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:01 vm04 ceph-mon[94619]: pgmap v52: 161 pgs: 161 active+clean; 457 KiB data, 146 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:18:01.606 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:01 vm04 ceph-mon[96438]: pgmap v52: 161 pgs: 161 active+clean; 457 KiB data, 146 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:18:03.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:03 vm10 ceph-mon[82076]: pgmap v53: 161 pgs: 161 active+clean; 457 KiB data, 146 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:18:03.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:03 vm04 ceph-mon[96438]: pgmap v53: 161 pgs: 161 active+clean; 457 KiB data, 146 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:18:03.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:03 vm04 ceph-mon[94619]: pgmap v53: 161 pgs: 161 active+clean; 457 KiB data, 146 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:18:04.502 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:04 vm04 ceph-mon[96438]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:18:04.502 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:18:04 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:18:04.149Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:18:04.502 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:04 vm04 ceph-mon[94619]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:18:04.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:04 vm10 ceph-mon[82076]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:18:05.350 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:18:05 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:18:05] "GET /metrics HTTP/1.1" 200 37657 "" "Prometheus/2.51.0" 2026-03-09T00:18:05.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:05 vm04 ceph-mon[96438]: pgmap v54: 161 pgs: 161 active+clean; 457 KiB data, 146 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:18:05.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:05 vm04 ceph-mon[94619]: pgmap v54: 161 pgs: 161 active+clean; 457 KiB data, 146 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:18:05.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:05 vm10 ceph-mon[82076]: pgmap v54: 161 pgs: 161 active+clean; 457 KiB data, 146 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:18:07.274 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:18:07 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:18:07.008Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:18:07.274 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:18:07 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:18:07.009Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:18:07.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:07 vm10 ceph-mon[82076]: pgmap v55: 161 pgs: 161 active+clean; 457 KiB data, 146 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:18:07.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:07 vm04 ceph-mon[96438]: pgmap v55: 161 pgs: 161 active+clean; 457 KiB data, 146 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:18:07.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:07 vm04 ceph-mon[94619]: pgmap v55: 161 pgs: 161 active+clean; 457 KiB data, 146 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:18:09.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:09 vm10 ceph-mon[82076]: pgmap v56: 161 pgs: 161 active+clean; 457 KiB data, 146 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:18:09.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:09 vm04 ceph-mon[96438]: pgmap v56: 161 pgs: 161 active+clean; 457 KiB data, 146 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:18:09.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:09 vm04 ceph-mon[94619]: pgmap v56: 161 pgs: 161 active+clean; 457 KiB data, 146 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:18:11.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:11 vm10 ceph-mon[82076]: pgmap v57: 161 pgs: 161 active+clean; 457 KiB data, 146 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:18:11.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:11 vm04 ceph-mon[96438]: pgmap v57: 161 pgs: 161 active+clean; 457 KiB data, 146 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:18:11.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:11 vm04 ceph-mon[94619]: pgmap v57: 161 pgs: 161 active+clean; 457 KiB data, 146 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:18:12.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:12 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:18:12.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:12 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:18:12.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:12 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:18:13.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:13 vm10 ceph-mon[82076]: pgmap v58: 161 pgs: 161 active+clean; 457 KiB data, 146 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:18:13.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:13 vm04 ceph-mon[96438]: pgmap v58: 161 pgs: 161 active+clean; 457 KiB data, 146 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:18:13.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:13 vm04 ceph-mon[94619]: pgmap v58: 161 pgs: 161 active+clean; 457 KiB data, 146 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:18:14.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:14 vm10 ceph-mon[82076]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:18:14.600 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:18:14 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:18:14.149Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=1 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:18:14.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:14 vm04 ceph-mon[96438]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:18:14.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:14 vm04 ceph-mon[94619]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:18:15.184 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:18:15 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:18:15] "GET /metrics HTTP/1.1" 200 37671 "" "Prometheus/2.51.0" 2026-03-09T00:18:15.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:15 vm10 ceph-mon[82076]: pgmap v59: 161 pgs: 161 active+clean; 457 KiB data, 146 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:18:15.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:15 vm04 ceph-mon[96438]: pgmap v59: 161 pgs: 161 active+clean; 457 KiB data, 146 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:18:15.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:15 vm04 ceph-mon[94619]: pgmap v59: 161 pgs: 161 active+clean; 457 KiB data, 146 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:18:16.432 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch ps' 2026-03-09T00:18:16.898 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-09T00:18:16.898 INFO:teuthology.orchestra.run.vm04.stdout:alertmanager.a vm04 *:9093,9094 running (15m) 32s ago 21m 28.2M - 0.25.0 c8568f914cd2 bcac0140b0f6 2026-03-09T00:18:16.898 INFO:teuthology.orchestra.run.vm04.stdout:grafana.a vm10 *:3000 running (2m) 93s ago 21m 78.5M - 10.4.0 c8b91775d855 aa7f793dcb8e 2026-03-09T00:18:16.898 INFO:teuthology.orchestra.run.vm04.stdout:iscsi.foo.vm04.fbyciv vm04 running (2m) 32s ago 21m 51.5M - 3.5 e1d6a67b021e cdb4168e72eb 2026-03-09T00:18:16.898 INFO:teuthology.orchestra.run.vm04.stdout:mgr.x vm10 *:8443,9283,8765 running (2m) 93s ago 23m 485M - 19.2.3-678-ge911bdeb 654f31e6858e c971ca6e9652 2026-03-09T00:18:16.898 INFO:teuthology.orchestra.run.vm04.stdout:mgr.y vm04 *:8443,9283,8765 running (12m) 32s ago 24m 551M - 19.2.3-678-ge911bdeb 654f31e6858e 72a51572b51b 2026-03-09T00:18:16.898 INFO:teuthology.orchestra.run.vm04.stdout:mon.a vm04 running (99s) 32s ago 24m 49.5M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 3a1ecb9ee7d1 2026-03-09T00:18:16.898 INFO:teuthology.orchestra.run.vm04.stdout:mon.b vm10 running (2m) 93s ago 23m 41.3M 2048M 19.2.3-678-ge911bdeb 654f31e6858e b102ade927df 2026-03-09T00:18:16.898 INFO:teuthology.orchestra.run.vm04.stdout:mon.c vm04 running (90s) 32s ago 23m 38.6M 2048M 19.2.3-678-ge911bdeb 654f31e6858e d5119f6d2345 2026-03-09T00:18:16.898 INFO:teuthology.orchestra.run.vm04.stdout:node-exporter.a vm04 *:9100 running (15m) 32s ago 21m 9995k - 1.7.0 72c9c2088986 38e0af6b2fbf 2026-03-09T00:18:16.898 INFO:teuthology.orchestra.run.vm04.stdout:node-exporter.b vm10 *:9100 running (14m) 93s ago 21m 10.1M - 1.7.0 72c9c2088986 d059c0022310 2026-03-09T00:18:16.898 INFO:teuthology.orchestra.run.vm04.stdout:osd.0 vm04 running (59s) 32s ago 23m 68.0M 4096M 19.2.3-678-ge911bdeb 654f31e6858e a5eb77bcb38b 2026-03-09T00:18:16.898 INFO:teuthology.orchestra.run.vm04.stdout:osd.1 vm04 running (35s) 32s ago 23m 15.4M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 522cf40e592d 2026-03-09T00:18:16.898 INFO:teuthology.orchestra.run.vm04.stdout:osd.2 vm04 running (23m) 32s ago 23m 53.7M 4096M 17.2.0 e1d6a67b021e a4ed5ecab7e4 2026-03-09T00:18:16.898 INFO:teuthology.orchestra.run.vm04.stdout:osd.3 vm04 running (22m) 32s ago 22m 56.7M 4096M 17.2.0 e1d6a67b021e d530f6e786d9 2026-03-09T00:18:16.898 INFO:teuthology.orchestra.run.vm04.stdout:osd.4 vm10 running (22m) 93s ago 22m 55.0M 4096M 17.2.0 e1d6a67b021e ad302e6f363c 2026-03-09T00:18:16.898 INFO:teuthology.orchestra.run.vm04.stdout:osd.5 vm10 running (22m) 93s ago 22m 53.8M 4096M 17.2.0 e1d6a67b021e 1bef86fdb303 2026-03-09T00:18:16.898 INFO:teuthology.orchestra.run.vm04.stdout:osd.6 vm10 running (22m) 93s ago 22m 52.4M 4096M 17.2.0 e1d6a67b021e 168db5828111 2026-03-09T00:18:16.898 INFO:teuthology.orchestra.run.vm04.stdout:osd.7 vm10 running (22m) 93s ago 22m 57.2M 4096M 17.2.0 e1d6a67b021e bc6bbac15079 2026-03-09T00:18:16.898 INFO:teuthology.orchestra.run.vm04.stdout:prometheus.a vm10 *:9095 running (2m) 93s ago 21m 51.3M - 2.51.0 1d3b7f56885b 1f53121cfa7f 2026-03-09T00:18:16.898 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.ehrfsf vm04 *:8000 running (21m) 32s ago 21m 99.8M - 17.2.0 e1d6a67b021e a815abb0c790 2026-03-09T00:18:16.898 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm10.dwizvi vm10 *:8000 running (21m) 93s ago 21m 97.3M - 17.2.0 e1d6a67b021e f6412acdf6e0 2026-03-09T00:18:16.948 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph versions | jq -e '"'"'.osd | length == 2'"'"'' 2026-03-09T00:18:17.149 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:18:17 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:18:17.009Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:18:17.149 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:18:17 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:18:17.010Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:18:17.484 INFO:teuthology.orchestra.run.vm04.stdout:true 2026-03-09T00:18:17.485 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:17 vm04 ceph-mon[96438]: pgmap v60: 161 pgs: 161 active+clean; 457 KiB data, 146 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:18:17.485 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:17 vm04 ceph-mon[96438]: from='client.34259 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:18:17.485 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:17 vm04 ceph-mon[94619]: pgmap v60: 161 pgs: 161 active+clean; 457 KiB data, 146 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:18:17.485 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:17 vm04 ceph-mon[94619]: from='client.34259 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:18:17.539 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch upgrade check quay.ceph.io/ceph-ci/ceph:$sha1 | jq -e '"'"'.up_to_date | length == 7'"'"'' 2026-03-09T00:18:17.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:17 vm10 ceph-mon[82076]: pgmap v60: 161 pgs: 161 active+clean; 457 KiB data, 146 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:18:17.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:17 vm10 ceph-mon[82076]: from='client.34259 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:18:18.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:18 vm10 ceph-mon[82076]: from='client.44220 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:18:18.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:18 vm10 ceph-mon[82076]: from='client.? 192.168.123.104:0/2535291511' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:18:18.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:18 vm04 ceph-mon[96438]: from='client.44220 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:18:18.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:18 vm04 ceph-mon[96438]: from='client.? 192.168.123.104:0/2535291511' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:18:18.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:18 vm04 ceph-mon[94619]: from='client.44220 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:18:18.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:18 vm04 ceph-mon[94619]: from='client.? 192.168.123.104:0/2535291511' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:18:19.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:19 vm04 ceph-mon[94619]: pgmap v61: 161 pgs: 161 active+clean; 457 KiB data, 146 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:18:19.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:19 vm04 ceph-mon[94619]: from='client.44232 -' entity='client.admin' cmd=[{"prefix": "orch upgrade check", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:18:19.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:19 vm04 ceph-mon[96438]: pgmap v61: 161 pgs: 161 active+clean; 457 KiB data, 146 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:18:19.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:19 vm04 ceph-mon[96438]: from='client.44232 -' entity='client.admin' cmd=[{"prefix": "orch upgrade check", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:18:19.763 INFO:teuthology.orchestra.run.vm04.stdout:true 2026-03-09T00:18:19.801 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch upgrade status' 2026-03-09T00:18:19.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:19 vm10 ceph-mon[82076]: pgmap v61: 161 pgs: 161 active+clean; 457 KiB data, 146 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:18:19.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:19 vm10 ceph-mon[82076]: from='client.44232 -' entity='client.admin' cmd=[{"prefix": "orch upgrade check", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:18:20.308 INFO:teuthology.orchestra.run.vm04.stdout:{ 2026-03-09T00:18:20.308 INFO:teuthology.orchestra.run.vm04.stdout: "target_image": null, 2026-03-09T00:18:20.308 INFO:teuthology.orchestra.run.vm04.stdout: "in_progress": false, 2026-03-09T00:18:20.308 INFO:teuthology.orchestra.run.vm04.stdout: "which": "", 2026-03-09T00:18:20.308 INFO:teuthology.orchestra.run.vm04.stdout: "services_complete": [], 2026-03-09T00:18:20.308 INFO:teuthology.orchestra.run.vm04.stdout: "progress": null, 2026-03-09T00:18:20.308 INFO:teuthology.orchestra.run.vm04.stdout: "message": "", 2026-03-09T00:18:20.308 INFO:teuthology.orchestra.run.vm04.stdout: "is_paused": false 2026-03-09T00:18:20.308 INFO:teuthology.orchestra.run.vm04.stdout:} 2026-03-09T00:18:21.085 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph health detail' 2026-03-09T00:18:21.988 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:21 vm04 ceph-mon[94619]: pgmap v62: 161 pgs: 161 active+clean; 457 KiB data, 146 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:18:21.988 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:21 vm04 ceph-mon[94619]: from='client.44238 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:18:21.988 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:21 vm04 ceph-mon[96438]: pgmap v62: 161 pgs: 161 active+clean; 457 KiB data, 146 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:18:21.988 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:21 vm04 ceph-mon[96438]: from='client.44238 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:18:22.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:21 vm10 ceph-mon[82076]: pgmap v62: 161 pgs: 161 active+clean; 457 KiB data, 146 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:18:22.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:21 vm10 ceph-mon[82076]: from='client.44238 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:18:22.461 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_OK 2026-03-09T00:18:23.038 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types crash,osd --limit 1' 2026-03-09T00:18:23.315 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:23 vm04 ceph-mon[94619]: from='client.? 192.168.123.104:0/3980421721' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T00:18:23.316 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:23 vm04 ceph-mon[96438]: from='client.? 192.168.123.104:0/3980421721' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T00:18:23.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:23 vm10 ceph-mon[82076]: from='client.? 192.168.123.104:0/3980421721' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T00:18:24.256 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:24 vm04 ceph-mon[94619]: pgmap v63: 161 pgs: 161 active+clean; 457 KiB data, 146 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:18:24.256 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:24 vm04 ceph-mon[94619]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:18:24.579 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:24 vm10 ceph-mon[82076]: pgmap v63: 161 pgs: 161 active+clean; 457 KiB data, 146 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:18:24.579 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:24 vm10 ceph-mon[82076]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:18:24.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:24 vm04 ceph-mon[96438]: pgmap v63: 161 pgs: 161 active+clean; 457 KiB data, 146 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:18:24.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:24 vm04 ceph-mon[96438]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:18:25.351 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:18:25 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:18:25] "GET /metrics HTTP/1.1" 200 37671 "" "Prometheus/2.51.0" 2026-03-09T00:18:25.737 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:25 vm04 ceph-mon[94619]: pgmap v64: 161 pgs: 161 active+clean; 457 KiB data, 146 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:18:25.737 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:25 vm04 ceph-mon[94619]: from='client.44250 -' entity='client.admin' cmd=[{"prefix": "orch upgrade start", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "daemon_types": "crash,osd", "limit": 1, "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:18:25.737 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:25 vm04 ceph-mon[96438]: pgmap v64: 161 pgs: 161 active+clean; 457 KiB data, 146 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:18:25.737 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:25 vm04 ceph-mon[96438]: from='client.44250 -' entity='client.admin' cmd=[{"prefix": "orch upgrade start", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "daemon_types": "crash,osd", "limit": 1, "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:18:25.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:25 vm10 ceph-mon[82076]: pgmap v64: 161 pgs: 161 active+clean; 457 KiB data, 146 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:18:25.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:25 vm10 ceph-mon[82076]: from='client.44250 -' entity='client.admin' cmd=[{"prefix": "orch upgrade start", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "daemon_types": "crash,osd", "limit": 1, "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:18:26.848 INFO:teuthology.orchestra.run.vm04.stdout:Initiating upgrade to quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T00:18:27.013 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:18:27 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:18:27.010Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:18:27.013 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:18:27 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:18:27.012Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:18:27.278 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'while ceph orch upgrade status | jq '"'"'.in_progress'"'"' | grep true && ! ceph orch upgrade status | jq '"'"'.message'"'"' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; sleep 30 ; done' 2026-03-09T00:18:27.804 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:27 vm04 ceph-mon[94619]: pgmap v65: 161 pgs: 161 active+clean; 457 KiB data, 146 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:18:27.804 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:27 vm04 ceph-mon[94619]: Upgrade: Started with target quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T00:18:27.804 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:27 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:18:27.804 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:27 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:18:27.804 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:27 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:18:27.804 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:27 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:18:27.804 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:27 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:18:27.804 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:27 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:18:28.057 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:27 vm04 ceph-mon[96438]: pgmap v65: 161 pgs: 161 active+clean; 457 KiB data, 146 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:18:28.057 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:27 vm04 ceph-mon[96438]: Upgrade: Started with target quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T00:18:28.057 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:27 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:18:28.057 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:27 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:18:28.057 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:27 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:18:28.057 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:27 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:18:28.057 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:27 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:18:28.057 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:27 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:18:28.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:27 vm10 ceph-mon[82076]: pgmap v65: 161 pgs: 161 active+clean; 457 KiB data, 146 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:18:28.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:27 vm10 ceph-mon[82076]: Upgrade: Started with target quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T00:18:28.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:27 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:18:28.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:27 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:18:28.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:27 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:18:28.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:27 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:18:28.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:27 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:18:28.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:27 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:18:28.835 INFO:teuthology.orchestra.run.vm04.stdout:true 2026-03-09T00:18:29.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:29 vm10 ceph-mon[82076]: Upgrade: First pull of quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T00:18:29.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:29 vm10 ceph-mon[82076]: pgmap v66: 161 pgs: 161 active+clean; 457 KiB data, 146 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:18:29.349 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:29 vm04 ceph-mon[94619]: Upgrade: First pull of quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T00:18:29.349 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:29 vm04 ceph-mon[94619]: pgmap v66: 161 pgs: 161 active+clean; 457 KiB data, 146 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:18:29.349 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:29 vm04 ceph-mon[96438]: Upgrade: First pull of quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T00:18:29.349 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:29 vm04 ceph-mon[96438]: pgmap v66: 161 pgs: 161 active+clean; 457 KiB data, 146 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:18:29.404 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-09T00:18:29.404 INFO:teuthology.orchestra.run.vm04.stdout:alertmanager.a vm04 *:9093,9094 running (15m) 44s ago 21m 28.2M - 0.25.0 c8568f914cd2 bcac0140b0f6 2026-03-09T00:18:29.404 INFO:teuthology.orchestra.run.vm04.stdout:grafana.a vm10 *:3000 running (2m) 105s ago 21m 78.5M - 10.4.0 c8b91775d855 aa7f793dcb8e 2026-03-09T00:18:29.404 INFO:teuthology.orchestra.run.vm04.stdout:iscsi.foo.vm04.fbyciv vm04 running (2m) 44s ago 21m 51.5M - 3.5 e1d6a67b021e cdb4168e72eb 2026-03-09T00:18:29.404 INFO:teuthology.orchestra.run.vm04.stdout:mgr.x vm10 *:8443,9283,8765 running (3m) 105s ago 23m 485M - 19.2.3-678-ge911bdeb 654f31e6858e c971ca6e9652 2026-03-09T00:18:29.404 INFO:teuthology.orchestra.run.vm04.stdout:mgr.y vm04 *:8443,9283,8765 running (12m) 44s ago 24m 551M - 19.2.3-678-ge911bdeb 654f31e6858e 72a51572b51b 2026-03-09T00:18:29.404 INFO:teuthology.orchestra.run.vm04.stdout:mon.a vm04 running (112s) 44s ago 24m 49.5M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 3a1ecb9ee7d1 2026-03-09T00:18:29.404 INFO:teuthology.orchestra.run.vm04.stdout:mon.b vm10 running (2m) 105s ago 23m 41.3M 2048M 19.2.3-678-ge911bdeb 654f31e6858e b102ade927df 2026-03-09T00:18:29.404 INFO:teuthology.orchestra.run.vm04.stdout:mon.c vm04 running (102s) 44s ago 23m 38.6M 2048M 19.2.3-678-ge911bdeb 654f31e6858e d5119f6d2345 2026-03-09T00:18:29.404 INFO:teuthology.orchestra.run.vm04.stdout:node-exporter.a vm04 *:9100 running (15m) 44s ago 22m 9995k - 1.7.0 72c9c2088986 38e0af6b2fbf 2026-03-09T00:18:29.404 INFO:teuthology.orchestra.run.vm04.stdout:node-exporter.b vm10 *:9100 running (15m) 105s ago 22m 10.1M - 1.7.0 72c9c2088986 d059c0022310 2026-03-09T00:18:29.404 INFO:teuthology.orchestra.run.vm04.stdout:osd.0 vm04 running (71s) 44s ago 23m 68.0M 4096M 19.2.3-678-ge911bdeb 654f31e6858e a5eb77bcb38b 2026-03-09T00:18:29.404 INFO:teuthology.orchestra.run.vm04.stdout:osd.1 vm04 running (47s) 44s ago 23m 15.4M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 522cf40e592d 2026-03-09T00:18:29.404 INFO:teuthology.orchestra.run.vm04.stdout:osd.2 vm04 running (23m) 44s ago 23m 53.7M 4096M 17.2.0 e1d6a67b021e a4ed5ecab7e4 2026-03-09T00:18:29.404 INFO:teuthology.orchestra.run.vm04.stdout:osd.3 vm04 running (23m) 44s ago 23m 56.7M 4096M 17.2.0 e1d6a67b021e d530f6e786d9 2026-03-09T00:18:29.404 INFO:teuthology.orchestra.run.vm04.stdout:osd.4 vm10 running (22m) 105s ago 22m 55.0M 4096M 17.2.0 e1d6a67b021e ad302e6f363c 2026-03-09T00:18:29.404 INFO:teuthology.orchestra.run.vm04.stdout:osd.5 vm10 running (22m) 105s ago 22m 53.8M 4096M 17.2.0 e1d6a67b021e 1bef86fdb303 2026-03-09T00:18:29.404 INFO:teuthology.orchestra.run.vm04.stdout:osd.6 vm10 running (22m) 105s ago 22m 52.4M 4096M 17.2.0 e1d6a67b021e 168db5828111 2026-03-09T00:18:29.404 INFO:teuthology.orchestra.run.vm04.stdout:osd.7 vm10 running (22m) 105s ago 22m 57.2M 4096M 17.2.0 e1d6a67b021e bc6bbac15079 2026-03-09T00:18:29.404 INFO:teuthology.orchestra.run.vm04.stdout:prometheus.a vm10 *:9095 running (3m) 105s ago 21m 51.3M - 2.51.0 1d3b7f56885b 1f53121cfa7f 2026-03-09T00:18:29.404 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.ehrfsf vm04 *:8000 running (21m) 44s ago 21m 99.8M - 17.2.0 e1d6a67b021e a815abb0c790 2026-03-09T00:18:29.404 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm10.dwizvi vm10 *:8000 running (21m) 105s ago 21m 97.3M - 17.2.0 e1d6a67b021e f6412acdf6e0 2026-03-09T00:18:29.734 INFO:teuthology.orchestra.run.vm04.stdout:{ 2026-03-09T00:18:29.734 INFO:teuthology.orchestra.run.vm04.stdout: "mon": { 2026-03-09T00:18:29.734 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 3 2026-03-09T00:18:29.734 INFO:teuthology.orchestra.run.vm04.stdout: }, 2026-03-09T00:18:29.734 INFO:teuthology.orchestra.run.vm04.stdout: "mgr": { 2026-03-09T00:18:29.734 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-09T00:18:29.734 INFO:teuthology.orchestra.run.vm04.stdout: }, 2026-03-09T00:18:29.734 INFO:teuthology.orchestra.run.vm04.stdout: "osd": { 2026-03-09T00:18:29.734 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 6, 2026-03-09T00:18:29.734 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-09T00:18:29.734 INFO:teuthology.orchestra.run.vm04.stdout: }, 2026-03-09T00:18:29.734 INFO:teuthology.orchestra.run.vm04.stdout: "rgw": { 2026-03-09T00:18:29.734 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 2 2026-03-09T00:18:29.734 INFO:teuthology.orchestra.run.vm04.stdout: }, 2026-03-09T00:18:29.734 INFO:teuthology.orchestra.run.vm04.stdout: "overall": { 2026-03-09T00:18:29.734 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 8, 2026-03-09T00:18:29.734 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 7 2026-03-09T00:18:29.734 INFO:teuthology.orchestra.run.vm04.stdout: } 2026-03-09T00:18:29.734 INFO:teuthology.orchestra.run.vm04.stdout:} 2026-03-09T00:18:29.983 INFO:teuthology.orchestra.run.vm04.stdout:{ 2026-03-09T00:18:29.983 INFO:teuthology.orchestra.run.vm04.stdout: "target_image": "quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc", 2026-03-09T00:18:29.983 INFO:teuthology.orchestra.run.vm04.stdout: "in_progress": true, 2026-03-09T00:18:29.984 INFO:teuthology.orchestra.run.vm04.stdout: "which": "Upgrading daemons of type(s) crash,osd. Upgrade limited to 1 daemons (1 remaining).", 2026-03-09T00:18:29.984 INFO:teuthology.orchestra.run.vm04.stdout: "services_complete": [], 2026-03-09T00:18:29.984 INFO:teuthology.orchestra.run.vm04.stdout: "progress": "2/8 daemons upgraded", 2026-03-09T00:18:29.984 INFO:teuthology.orchestra.run.vm04.stdout: "message": "Doing first pull of quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df image", 2026-03-09T00:18:29.984 INFO:teuthology.orchestra.run.vm04.stdout: "is_paused": false 2026-03-09T00:18:29.984 INFO:teuthology.orchestra.run.vm04.stdout:} 2026-03-09T00:18:30.988 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:30 vm04 ceph-mon[94619]: from='client.44256 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:18:30.988 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:30 vm04 ceph-mon[94619]: from='client.34286 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:18:30.988 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:30 vm04 ceph-mon[94619]: from='client.44265 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:18:30.988 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:30 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:18:30.988 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:30 vm04 ceph-mon[94619]: Upgrade: Target is version 19.2.3-678-ge911bdeb (squid) 2026-03-09T00:18:30.988 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:30 vm04 ceph-mon[94619]: Upgrade: Target container is quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, digests ['quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc'] 2026-03-09T00:18:30.988 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:30 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:18:30.988 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:30 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:18:30.988 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:30 vm04 ceph-mon[94619]: Upgrade: Setting container_image for all mgr 2026-03-09T00:18:30.988 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:30 vm04 ceph-mon[94619]: from='client.? 192.168.123.104:0/1808777675' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:18:30.988 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:30 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:18:30.988 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:30 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:18:30.988 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:30 vm04 ceph-mon[94619]: Upgrade: Setting container_image for all mon 2026-03-09T00:18:30.988 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:30 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:18:30.988 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:30 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:18:30.988 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:30 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:18:30.988 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:30 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["2"], "max": 16}]: dispatch 2026-03-09T00:18:30.989 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:30 vm04 ceph-mon[96438]: from='client.44256 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:18:30.989 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:30 vm04 ceph-mon[96438]: from='client.34286 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:18:30.989 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:30 vm04 ceph-mon[96438]: from='client.44265 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:18:30.989 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:30 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:18:30.989 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:30 vm04 ceph-mon[96438]: Upgrade: Target is version 19.2.3-678-ge911bdeb (squid) 2026-03-09T00:18:30.989 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:30 vm04 ceph-mon[96438]: Upgrade: Target container is quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, digests ['quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc'] 2026-03-09T00:18:30.989 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:30 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:18:30.989 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:30 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:18:30.989 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:30 vm04 ceph-mon[96438]: Upgrade: Setting container_image for all mgr 2026-03-09T00:18:30.989 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:30 vm04 ceph-mon[96438]: from='client.? 192.168.123.104:0/1808777675' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:18:30.989 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:30 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:18:30.989 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:30 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:18:30.989 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:30 vm04 ceph-mon[96438]: Upgrade: Setting container_image for all mon 2026-03-09T00:18:30.989 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:30 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:18:30.989 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:30 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:18:30.989 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:30 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:18:30.989 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:30 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["2"], "max": 16}]: dispatch 2026-03-09T00:18:31.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:30 vm10 ceph-mon[82076]: from='client.44256 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:18:31.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:30 vm10 ceph-mon[82076]: from='client.34286 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:18:31.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:30 vm10 ceph-mon[82076]: from='client.44265 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:18:31.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:30 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:18:31.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:30 vm10 ceph-mon[82076]: Upgrade: Target is version 19.2.3-678-ge911bdeb (squid) 2026-03-09T00:18:31.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:30 vm10 ceph-mon[82076]: Upgrade: Target container is quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, digests ['quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc'] 2026-03-09T00:18:31.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:30 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:18:31.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:30 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:18:31.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:30 vm10 ceph-mon[82076]: Upgrade: Setting container_image for all mgr 2026-03-09T00:18:31.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:30 vm10 ceph-mon[82076]: from='client.? 192.168.123.104:0/1808777675' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:18:31.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:30 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:18:31.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:30 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:18:31.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:30 vm10 ceph-mon[82076]: Upgrade: Setting container_image for all mon 2026-03-09T00:18:31.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:30 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:18:31.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:30 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:18:31.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:30 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:18:31.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:30 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["2"], "max": 16}]: dispatch 2026-03-09T00:18:31.987 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:31 vm04 ceph-mon[94619]: pgmap v67: 161 pgs: 161 active+clean; 457 KiB data, 146 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:18:31.988 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:31 vm04 ceph-mon[94619]: Upgrade: Setting container_image for all crash 2026-03-09T00:18:31.988 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:31 vm04 ceph-mon[94619]: from='client.44274 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:18:31.988 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:31 vm04 ceph-mon[94619]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["2"], "max": 16}]: dispatch 2026-03-09T00:18:31.988 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:31 vm04 ceph-mon[94619]: Upgrade: osd.2 is safe to restart 2026-03-09T00:18:31.988 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:31 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:18:31.988 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:31 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.2"}]: dispatch 2026-03-09T00:18:31.988 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:31 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:18:31.988 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:31 vm04 ceph-mon[96438]: pgmap v67: 161 pgs: 161 active+clean; 457 KiB data, 146 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:18:31.988 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:31 vm04 ceph-mon[96438]: Upgrade: Setting container_image for all crash 2026-03-09T00:18:31.988 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:31 vm04 ceph-mon[96438]: from='client.44274 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:18:31.988 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:31 vm04 ceph-mon[96438]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["2"], "max": 16}]: dispatch 2026-03-09T00:18:31.988 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:31 vm04 ceph-mon[96438]: Upgrade: osd.2 is safe to restart 2026-03-09T00:18:31.988 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:31 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:18:31.988 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:31 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.2"}]: dispatch 2026-03-09T00:18:31.988 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:31 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:18:32.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:31 vm10 ceph-mon[82076]: pgmap v67: 161 pgs: 161 active+clean; 457 KiB data, 146 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:18:32.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:31 vm10 ceph-mon[82076]: Upgrade: Setting container_image for all crash 2026-03-09T00:18:32.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:31 vm10 ceph-mon[82076]: from='client.44274 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:18:32.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:31 vm10 ceph-mon[82076]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["2"], "max": 16}]: dispatch 2026-03-09T00:18:32.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:31 vm10 ceph-mon[82076]: Upgrade: osd.2 is safe to restart 2026-03-09T00:18:32.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:31 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:18:32.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:31 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.2"}]: dispatch 2026-03-09T00:18:32.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:31 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:18:32.851 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 09 00:18:32 vm04 systemd[1]: Stopping Ceph osd.2 for fdcbddf6-1b49-11f1-80b0-7392062373f9... 2026-03-09T00:18:33.352 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:33 vm04 ceph-mon[96438]: Upgrade: Updating osd.2 2026-03-09T00:18:33.353 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:33 vm04 ceph-mon[96438]: Deploying daemon osd.2 on vm04 2026-03-09T00:18:33.353 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 09 00:18:32 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-2[59555]: 2026-03-09T00:18:32.911+0000 7ff5d5667700 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-09T00:18:33.353 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 09 00:18:32 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-2[59555]: 2026-03-09T00:18:32.911+0000 7ff5d5667700 -1 osd.2 107 *** Got signal Terminated *** 2026-03-09T00:18:33.353 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 09 00:18:32 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-2[59555]: 2026-03-09T00:18:32.911+0000 7ff5d5667700 -1 osd.2 107 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-09T00:18:33.353 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:33 vm04 ceph-mon[94619]: Upgrade: Updating osd.2 2026-03-09T00:18:33.353 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:33 vm04 ceph-mon[94619]: Deploying daemon osd.2 on vm04 2026-03-09T00:18:33.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:33 vm10 ceph-mon[82076]: Upgrade: Updating osd.2 2026-03-09T00:18:33.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:33 vm10 ceph-mon[82076]: Deploying daemon osd.2 on vm04 2026-03-09T00:18:34.325 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 09 00:18:34 vm04 podman[111475]: 2026-03-09 00:18:34.053457114 +0000 UTC m=+1.218379869 container died a4ed5ecab7e44f05ddced06028c84fc249101647b7afab8a0051e55c715d244b (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-2, GIT_REPO=https://github.com/ceph/ceph-container.git, build-date=2022-05-03T08:36:31.336870, io.k8s.display-name=CentOS Stream 8, name=centos-stream, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, vcs-type=git, vendor=Red Hat, Inc., com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., maintainer=Guillaume Abrioux , RELEASE=HEAD, architecture=x86_64, io.buildah.version=1.19.8, io.openshift.expose-services=, io.openshift.tags=base centos centos-stream, GIT_CLEAN=True, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, GIT_BRANCH=HEAD, release=754, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, com.redhat.component=centos-stream-container, ceph=True, distribution-scope=public, version=8, CEPH_POINT_RELEASE=-17.2.0, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac) 2026-03-09T00:18:34.325 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:34 vm04 ceph-mon[96438]: pgmap v68: 161 pgs: 161 active+clean; 457 KiB data, 146 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:18:34.325 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:34 vm04 ceph-mon[96438]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:18:34.325 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:34 vm04 ceph-mon[96438]: osd.2 marked itself down and dead 2026-03-09T00:18:34.325 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:34 vm04 ceph-mon[96438]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-09T00:18:34.325 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:34 vm04 ceph-mon[96438]: osdmap e108: 8 total, 7 up, 8 in 2026-03-09T00:18:34.325 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:34 vm04 ceph-mon[94619]: pgmap v68: 161 pgs: 161 active+clean; 457 KiB data, 146 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:18:34.325 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:34 vm04 ceph-mon[94619]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:18:34.325 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:34 vm04 ceph-mon[94619]: osd.2 marked itself down and dead 2026-03-09T00:18:34.325 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:34 vm04 ceph-mon[94619]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-09T00:18:34.325 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:34 vm04 ceph-mon[94619]: osdmap e108: 8 total, 7 up, 8 in 2026-03-09T00:18:34.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:34 vm10 ceph-mon[82076]: pgmap v68: 161 pgs: 161 active+clean; 457 KiB data, 146 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:18:34.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:34 vm10 ceph-mon[82076]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:18:34.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:34 vm10 ceph-mon[82076]: osd.2 marked itself down and dead 2026-03-09T00:18:34.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:34 vm10 ceph-mon[82076]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-09T00:18:34.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:34 vm10 ceph-mon[82076]: osdmap e108: 8 total, 7 up, 8 in 2026-03-09T00:18:34.583 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 09 00:18:34 vm04 podman[111475]: 2026-03-09 00:18:34.324257743 +0000 UTC m=+1.489180498 container remove a4ed5ecab7e44f05ddced06028c84fc249101647b7afab8a0051e55c715d244b (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-2, GIT_BRANCH=HEAD, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, GIT_CLEAN=True, vendor=Red Hat, Inc., com.redhat.license_terms=https://centos.org/legal/licensing-policy/, io.openshift.expose-services=, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, vcs-type=git, distribution-scope=public, CEPH_POINT_RELEASE=-17.2.0, com.redhat.component=centos-stream-container, version=8, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, GIT_REPO=https://github.com/ceph/ceph-container.git, maintainer=Guillaume Abrioux , ceph=True, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.tags=base centos centos-stream, build-date=2022-05-03T08:36:31.336870, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, io.k8s.display-name=CentOS Stream 8, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, architecture=x86_64, name=centos-stream, RELEASE=HEAD, release=754, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, io.buildah.version=1.19.8) 2026-03-09T00:18:34.583 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 09 00:18:34 vm04 bash[111475]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-2 2026-03-09T00:18:34.583 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 09 00:18:34 vm04 podman[111540]: 2026-03-09 00:18:34.520153924 +0000 UTC m=+0.046721886 container create 89bfc5f88e1970f625298452e1f42b32243ab028c230d97b96f2221c73b52412 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-2-deactivate, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team , io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.license=GPLv2, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df) 2026-03-09T00:18:34.851 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 09 00:18:34 vm04 podman[111540]: 2026-03-09 00:18:34.484639934 +0000 UTC m=+0.011207906 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc 2026-03-09T00:18:34.851 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 09 00:18:34 vm04 podman[111540]: 2026-03-09 00:18:34.651715681 +0000 UTC m=+0.178283652 container init 89bfc5f88e1970f625298452e1f42b32243ab028c230d97b96f2221c73b52412 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-2-deactivate, CEPH_REF=squid, org.label-schema.build-date=20260223, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git) 2026-03-09T00:18:34.851 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 09 00:18:34 vm04 podman[111540]: 2026-03-09 00:18:34.655317382 +0000 UTC m=+0.181885344 container start 89bfc5f88e1970f625298452e1f42b32243ab028c230d97b96f2221c73b52412 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-2-deactivate, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.schema-version=1.0, ceph=True, org.label-schema.build-date=20260223, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.license=GPLv2, CEPH_REF=squid, OSD_FLAVOR=default) 2026-03-09T00:18:34.851 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 09 00:18:34 vm04 podman[111540]: 2026-03-09 00:18:34.71096973 +0000 UTC m=+0.237537692 container attach 89bfc5f88e1970f625298452e1f42b32243ab028c230d97b96f2221c73b52412 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-2-deactivate, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team , GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.build-date=20260223) 2026-03-09T00:18:34.851 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 09 00:18:34 vm04 podman[111540]: 2026-03-09 00:18:34.809117745 +0000 UTC m=+0.335685707 container died 89bfc5f88e1970f625298452e1f42b32243ab028c230d97b96f2221c73b52412 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-2-deactivate, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_REF=squid, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9) 2026-03-09T00:18:35.234 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 09 00:18:34 vm04 podman[111540]: 2026-03-09 00:18:34.981385117 +0000 UTC m=+0.507953079 container remove 89bfc5f88e1970f625298452e1f42b32243ab028c230d97b96f2221c73b52412 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-2-deactivate, org.label-schema.vendor=CentOS, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, CEPH_REF=squid, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, ceph=True) 2026-03-09T00:18:35.234 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 09 00:18:34 vm04 systemd[1]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@osd.2.service: Deactivated successfully. 2026-03-09T00:18:35.234 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 09 00:18:34 vm04 systemd[1]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@osd.2.service: Unit process 111552 (conmon) remains running after unit stopped. 2026-03-09T00:18:35.234 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 09 00:18:34 vm04 systemd[1]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@osd.2.service: Unit process 111560 (podman) remains running after unit stopped. 2026-03-09T00:18:35.234 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 09 00:18:34 vm04 systemd[1]: Stopped Ceph osd.2 for fdcbddf6-1b49-11f1-80b0-7392062373f9. 2026-03-09T00:18:35.234 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 09 00:18:34 vm04 systemd[1]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@osd.2.service: Consumed 8.422s CPU time, 149.9M memory peak. 2026-03-09T00:18:35.235 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:18:35 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:18:35] "GET /metrics HTTP/1.1" 200 37668 "" "Prometheus/2.51.0" 2026-03-09T00:18:35.487 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 09 00:18:35 vm04 systemd[1]: Starting Ceph osd.2 for fdcbddf6-1b49-11f1-80b0-7392062373f9... 2026-03-09T00:18:35.487 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 09 00:18:35 vm04 podman[111642]: 2026-03-09 00:18:35.421453225 +0000 UTC m=+0.078937024 container create 3175755171b07147e52fa9e56d5fe8237324540531e2cf80c37e0bcf06845e5a (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-2-activate, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_REF=squid, OSD_FLAVOR=default, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team ) 2026-03-09T00:18:35.487 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 09 00:18:35 vm04 podman[111642]: 2026-03-09 00:18:35.35352846 +0000 UTC m=+0.011012259 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc 2026-03-09T00:18:35.487 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:35 vm04 ceph-mon[96438]: pgmap v69: 161 pgs: 161 active+clean; 457 KiB data, 146 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:18:35.487 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:35 vm04 ceph-mon[96438]: osdmap e109: 8 total, 7 up, 8 in 2026-03-09T00:18:35.487 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:35 vm04 ceph-mon[94619]: pgmap v69: 161 pgs: 161 active+clean; 457 KiB data, 146 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:18:35.487 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:35 vm04 ceph-mon[94619]: osdmap e109: 8 total, 7 up, 8 in 2026-03-09T00:18:35.831 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:35 vm10 ceph-mon[82076]: pgmap v69: 161 pgs: 161 active+clean; 457 KiB data, 146 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:18:35.831 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:35 vm10 ceph-mon[82076]: osdmap e109: 8 total, 7 up, 8 in 2026-03-09T00:18:35.851 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 09 00:18:35 vm04 podman[111642]: 2026-03-09 00:18:35.528536131 +0000 UTC m=+0.186019940 container init 3175755171b07147e52fa9e56d5fe8237324540531e2cf80c37e0bcf06845e5a (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-2-activate, org.label-schema.license=GPLv2, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20260223, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, io.buildah.version=1.41.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=squid, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df) 2026-03-09T00:18:35.851 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 09 00:18:35 vm04 podman[111642]: 2026-03-09 00:18:35.534534157 +0000 UTC m=+0.192017956 container start 3175755171b07147e52fa9e56d5fe8237324540531e2cf80c37e0bcf06845e5a (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-2-activate, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-09T00:18:35.851 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 09 00:18:35 vm04 podman[111642]: 2026-03-09 00:18:35.610900269 +0000 UTC m=+0.268384068 container attach 3175755171b07147e52fa9e56d5fe8237324540531e2cf80c37e0bcf06845e5a (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-2-activate, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.license=GPLv2, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, io.buildah.version=1.41.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-09T00:18:35.851 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 09 00:18:35 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-2-activate[111653]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T00:18:35.851 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 09 00:18:35 vm04 bash[111642]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T00:18:35.851 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 09 00:18:35 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-2-activate[111653]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T00:18:35.851 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 09 00:18:35 vm04 bash[111642]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T00:18:36.480 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 09 00:18:36 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-2-activate[111653]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-09T00:18:36.480 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 09 00:18:36 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-2-activate[111653]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T00:18:36.480 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 09 00:18:36 vm04 bash[111642]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-09T00:18:36.480 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 09 00:18:36 vm04 bash[111642]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T00:18:36.480 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 09 00:18:36 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-2-activate[111653]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T00:18:36.480 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 09 00:18:36 vm04 bash[111642]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T00:18:36.480 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 09 00:18:36 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-2-activate[111653]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-2 2026-03-09T00:18:36.480 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 09 00:18:36 vm04 bash[111642]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-2 2026-03-09T00:18:36.480 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 09 00:18:36 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-2-activate[111653]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-e406ab34-aa5e-46d7-8ed1-35c045804f1a/osd-block-53adb85c-2242-4b5e-a3ed-dfb1b448b743 --path /var/lib/ceph/osd/ceph-2 --no-mon-config 2026-03-09T00:18:36.480 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 09 00:18:36 vm04 bash[111642]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-e406ab34-aa5e-46d7-8ed1-35c045804f1a/osd-block-53adb85c-2242-4b5e-a3ed-dfb1b448b743 --path /var/lib/ceph/osd/ceph-2 --no-mon-config 2026-03-09T00:18:36.812 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 09 00:18:36 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-2-activate[111653]: Running command: /usr/bin/ln -snf /dev/ceph-e406ab34-aa5e-46d7-8ed1-35c045804f1a/osd-block-53adb85c-2242-4b5e-a3ed-dfb1b448b743 /var/lib/ceph/osd/ceph-2/block 2026-03-09T00:18:36.812 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 09 00:18:36 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-2-activate[111653]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-2/block 2026-03-09T00:18:36.812 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 09 00:18:36 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-2-activate[111653]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-2 2026-03-09T00:18:36.813 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 09 00:18:36 vm04 bash[111642]: Running command: /usr/bin/ln -snf /dev/ceph-e406ab34-aa5e-46d7-8ed1-35c045804f1a/osd-block-53adb85c-2242-4b5e-a3ed-dfb1b448b743 /var/lib/ceph/osd/ceph-2/block 2026-03-09T00:18:36.813 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 09 00:18:36 vm04 bash[111642]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-2/block 2026-03-09T00:18:36.813 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 09 00:18:36 vm04 bash[111642]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-2 2026-03-09T00:18:36.813 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 09 00:18:36 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-2-activate[111653]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-2 2026-03-09T00:18:36.813 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 09 00:18:36 vm04 bash[111642]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-2 2026-03-09T00:18:36.813 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 09 00:18:36 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-2-activate[111653]: --> ceph-volume lvm activate successful for osd ID: 2 2026-03-09T00:18:36.813 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 09 00:18:36 vm04 bash[111642]: --> ceph-volume lvm activate successful for osd ID: 2 2026-03-09T00:18:36.813 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 09 00:18:36 vm04 podman[111642]: 2026-03-09 00:18:36.596616457 +0000 UTC m=+1.254100256 container died 3175755171b07147e52fa9e56d5fe8237324540531e2cf80c37e0bcf06845e5a (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-2-activate, OSD_FLAVOR=default, ceph=True, org.label-schema.schema-version=1.0, CEPH_REF=squid, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, org.opencontainers.image.authors=Ceph Release Team , FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS) 2026-03-09T00:18:36.813 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:36 vm04 ceph-mon[96438]: Health check failed: Reduced data availability: 1 pg inactive, 1 pg peering (PG_AVAILABILITY) 2026-03-09T00:18:36.813 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:36 vm04 ceph-mon[94619]: Health check failed: Reduced data availability: 1 pg inactive, 1 pg peering (PG_AVAILABILITY) 2026-03-09T00:18:36.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:36 vm10 ceph-mon[82076]: Health check failed: Reduced data availability: 1 pg inactive, 1 pg peering (PG_AVAILABILITY) 2026-03-09T00:18:37.073 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:18:37 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:18:37.010Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:18:37.073 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:18:37 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:18:37.011Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:18:37.073 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 09 00:18:36 vm04 podman[111642]: 2026-03-09 00:18:36.836288953 +0000 UTC m=+1.493772752 container remove 3175755171b07147e52fa9e56d5fe8237324540531e2cf80c37e0bcf06845e5a (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-2-activate, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, io.buildah.version=1.41.3, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid) 2026-03-09T00:18:37.073 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 09 00:18:36 vm04 podman[111910]: 2026-03-09 00:18:36.997309378 +0000 UTC m=+0.052313021 container create 69a18f90367fdb6b22d2e8a15c29c25399e3ed450c3f98d6689baec8f824481f (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-2, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, CEPH_REF=squid, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, ceph=True, OSD_FLAVOR=default) 2026-03-09T00:18:37.073 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 09 00:18:37 vm04 podman[111910]: 2026-03-09 00:18:36.956524423 +0000 UTC m=+0.011528076 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc 2026-03-09T00:18:37.351 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 09 00:18:37 vm04 podman[111910]: 2026-03-09 00:18:37.122123576 +0000 UTC m=+0.177127219 container init 69a18f90367fdb6b22d2e8a15c29c25399e3ed450c3f98d6689baec8f824481f (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-2, ceph=True, org.opencontainers.image.authors=Ceph Release Team , GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid, OSD_FLAVOR=default, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, org.label-schema.build-date=20260223, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.license=GPLv2) 2026-03-09T00:18:37.351 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 09 00:18:37 vm04 podman[111910]: 2026-03-09 00:18:37.129511923 +0000 UTC m=+0.184515566 container start 69a18f90367fdb6b22d2e8a15c29c25399e3ed450c3f98d6689baec8f824481f (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-2, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team , GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS, CEPH_REF=squid, org.label-schema.license=GPLv2) 2026-03-09T00:18:37.351 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 09 00:18:37 vm04 bash[111910]: 69a18f90367fdb6b22d2e8a15c29c25399e3ed450c3f98d6689baec8f824481f 2026-03-09T00:18:37.351 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 09 00:18:37 vm04 systemd[1]: Started Ceph osd.2 for fdcbddf6-1b49-11f1-80b0-7392062373f9. 2026-03-09T00:18:38.030 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:37 vm04 ceph-mon[94619]: pgmap v72: 161 pgs: 7 peering, 12 stale+active+clean, 142 active+clean; 457 KiB data, 146 MiB used, 160 GiB / 160 GiB avail; 639 B/s rd, 0 op/s 2026-03-09T00:18:38.030 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:37 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:18:38.030 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:37 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:18:38.030 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:37 vm04 ceph-mon[96438]: pgmap v72: 161 pgs: 7 peering, 12 stale+active+clean, 142 active+clean; 457 KiB data, 146 MiB used, 160 GiB / 160 GiB avail; 639 B/s rd, 0 op/s 2026-03-09T00:18:38.030 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:37 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:18:38.030 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:37 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:18:38.031 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 09 00:18:37 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-2[111922]: 2026-03-09T00:18:37.745+0000 7feb324ca740 -1 Falling back to public interface 2026-03-09T00:18:38.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:37 vm10 ceph-mon[82076]: pgmap v72: 161 pgs: 7 peering, 12 stale+active+clean, 142 active+clean; 457 KiB data, 146 MiB used, 160 GiB / 160 GiB avail; 639 B/s rd, 0 op/s 2026-03-09T00:18:38.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:37 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:18:38.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:37 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:18:38.978 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:38 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:18:38.978 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:38 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:18:38.978 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:38 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:18:38.978 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:38 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:18:39.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:38 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:18:39.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:38 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:18:39.351 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 09 00:18:39 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-2[111922]: 2026-03-09T00:18:39.036+0000 7feb324ca740 -1 osd.2 0 read_superblock omap replica is missing. 2026-03-09T00:18:39.351 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 09 00:18:39 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-2[111922]: 2026-03-09T00:18:39.078+0000 7feb324ca740 -1 osd.2 107 log_to_monitors true 2026-03-09T00:18:39.916 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:39 vm04 ceph-mon[94619]: pgmap v73: 161 pgs: 19 active+undersized, 7 peering, 3 stale+active+clean, 11 active+undersized+degraded, 121 active+clean; 457 KiB data, 146 MiB used, 160 GiB / 160 GiB avail; 39/723 objects degraded (5.394%) 2026-03-09T00:18:39.916 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:39 vm04 ceph-mon[94619]: Health check failed: Degraded data redundancy: 39/723 objects degraded (5.394%), 11 pgs degraded (PG_DEGRADED) 2026-03-09T00:18:39.916 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:39 vm04 ceph-mon[94619]: from='osd.2 [v2:192.168.123.104:6818/1572793900,v1:192.168.123.104:6819/1572793900]' entity='osd.2' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]: dispatch 2026-03-09T00:18:39.916 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:39 vm04 ceph-mon[94619]: from='osd.2 ' entity='osd.2' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]: dispatch 2026-03-09T00:18:39.916 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:39 vm04 ceph-mon[96438]: pgmap v73: 161 pgs: 19 active+undersized, 7 peering, 3 stale+active+clean, 11 active+undersized+degraded, 121 active+clean; 457 KiB data, 146 MiB used, 160 GiB / 160 GiB avail; 39/723 objects degraded (5.394%) 2026-03-09T00:18:39.916 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:39 vm04 ceph-mon[96438]: Health check failed: Degraded data redundancy: 39/723 objects degraded (5.394%), 11 pgs degraded (PG_DEGRADED) 2026-03-09T00:18:39.916 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:39 vm04 ceph-mon[96438]: from='osd.2 [v2:192.168.123.104:6818/1572793900,v1:192.168.123.104:6819/1572793900]' entity='osd.2' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]: dispatch 2026-03-09T00:18:39.916 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:39 vm04 ceph-mon[96438]: from='osd.2 ' entity='osd.2' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]: dispatch 2026-03-09T00:18:40.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:39 vm10 ceph-mon[82076]: pgmap v73: 161 pgs: 19 active+undersized, 7 peering, 3 stale+active+clean, 11 active+undersized+degraded, 121 active+clean; 457 KiB data, 146 MiB used, 160 GiB / 160 GiB avail; 39/723 objects degraded (5.394%) 2026-03-09T00:18:40.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:39 vm10 ceph-mon[82076]: Health check failed: Degraded data redundancy: 39/723 objects degraded (5.394%), 11 pgs degraded (PG_DEGRADED) 2026-03-09T00:18:40.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:39 vm10 ceph-mon[82076]: from='osd.2 [v2:192.168.123.104:6818/1572793900,v1:192.168.123.104:6819/1572793900]' entity='osd.2' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]: dispatch 2026-03-09T00:18:40.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:39 vm10 ceph-mon[82076]: from='osd.2 ' entity='osd.2' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]: dispatch 2026-03-09T00:18:40.351 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 09 00:18:40 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-2[111922]: 2026-03-09T00:18:40.079+0000 7feb2a275640 -1 osd.2 107 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-09T00:18:41.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:41 vm10 ceph-mon[82076]: pgmap v74: 161 pgs: 31 active+undersized, 17 active+undersized+degraded, 113 active+clean; 457 KiB data, 146 MiB used, 160 GiB / 160 GiB avail; 70/723 objects degraded (9.682%) 2026-03-09T00:18:41.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:41 vm10 ceph-mon[82076]: Health check cleared: PG_AVAILABILITY (was: Reduced data availability: 1 pg inactive, 1 pg peering) 2026-03-09T00:18:41.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:41 vm10 ceph-mon[82076]: from='osd.2 ' entity='osd.2' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]': finished 2026-03-09T00:18:41.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:41 vm10 ceph-mon[82076]: osdmap e110: 8 total, 7 up, 8 in 2026-03-09T00:18:41.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:41 vm10 ceph-mon[82076]: from='osd.2 [v2:192.168.123.104:6818/1572793900,v1:192.168.123.104:6819/1572793900]' entity='osd.2' cmd=[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm04", "root=default"]}]: dispatch 2026-03-09T00:18:41.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:41 vm10 ceph-mon[82076]: from='osd.2 ' entity='osd.2' cmd=[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm04", "root=default"]}]: dispatch 2026-03-09T00:18:41.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:41 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:18:41.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:41 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:18:41.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:41 vm04 ceph-mon[94619]: pgmap v74: 161 pgs: 31 active+undersized, 17 active+undersized+degraded, 113 active+clean; 457 KiB data, 146 MiB used, 160 GiB / 160 GiB avail; 70/723 objects degraded (9.682%) 2026-03-09T00:18:41.609 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:41 vm04 ceph-mon[94619]: Health check cleared: PG_AVAILABILITY (was: Reduced data availability: 1 pg inactive, 1 pg peering) 2026-03-09T00:18:41.609 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:41 vm04 ceph-mon[94619]: from='osd.2 ' entity='osd.2' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]': finished 2026-03-09T00:18:41.609 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:41 vm04 ceph-mon[94619]: osdmap e110: 8 total, 7 up, 8 in 2026-03-09T00:18:41.609 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:41 vm04 ceph-mon[94619]: from='osd.2 [v2:192.168.123.104:6818/1572793900,v1:192.168.123.104:6819/1572793900]' entity='osd.2' cmd=[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm04", "root=default"]}]: dispatch 2026-03-09T00:18:41.609 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:41 vm04 ceph-mon[94619]: from='osd.2 ' entity='osd.2' cmd=[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm04", "root=default"]}]: dispatch 2026-03-09T00:18:41.609 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:41 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:18:41.609 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:41 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:18:41.609 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:41 vm04 ceph-mon[96438]: pgmap v74: 161 pgs: 31 active+undersized, 17 active+undersized+degraded, 113 active+clean; 457 KiB data, 146 MiB used, 160 GiB / 160 GiB avail; 70/723 objects degraded (9.682%) 2026-03-09T00:18:41.609 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:41 vm04 ceph-mon[96438]: Health check cleared: PG_AVAILABILITY (was: Reduced data availability: 1 pg inactive, 1 pg peering) 2026-03-09T00:18:41.609 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:41 vm04 ceph-mon[96438]: from='osd.2 ' entity='osd.2' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]': finished 2026-03-09T00:18:41.609 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:41 vm04 ceph-mon[96438]: osdmap e110: 8 total, 7 up, 8 in 2026-03-09T00:18:41.609 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:41 vm04 ceph-mon[96438]: from='osd.2 [v2:192.168.123.104:6818/1572793900,v1:192.168.123.104:6819/1572793900]' entity='osd.2' cmd=[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm04", "root=default"]}]: dispatch 2026-03-09T00:18:41.609 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:41 vm04 ceph-mon[96438]: from='osd.2 ' entity='osd.2' cmd=[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm04", "root=default"]}]: dispatch 2026-03-09T00:18:41.609 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:41 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:18:41.609 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:41 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:18:42.289 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:42 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:18:42.290 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:42 vm04 ceph-mon[94619]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-09T00:18:42.290 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:42 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:18:42.290 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:42 vm04 ceph-mon[94619]: osd.2 [v2:192.168.123.104:6818/1572793900,v1:192.168.123.104:6819/1572793900] boot 2026-03-09T00:18:42.290 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:42 vm04 ceph-mon[94619]: osdmap e111: 8 total, 8 up, 8 in 2026-03-09T00:18:42.290 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:42 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T00:18:42.290 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:42 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:18:42.290 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:42 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:18:42.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:42 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:18:42.581 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:42 vm10 ceph-mon[82076]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-09T00:18:42.581 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:42 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:18:42.581 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:42 vm10 ceph-mon[82076]: osd.2 [v2:192.168.123.104:6818/1572793900,v1:192.168.123.104:6819/1572793900] boot 2026-03-09T00:18:42.581 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:42 vm10 ceph-mon[82076]: osdmap e111: 8 total, 8 up, 8 in 2026-03-09T00:18:42.581 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:42 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T00:18:42.581 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:42 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:18:42.581 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:42 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:18:42.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:42 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:18:42.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:42 vm04 ceph-mon[96438]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-09T00:18:42.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:42 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:18:42.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:42 vm04 ceph-mon[96438]: osd.2 [v2:192.168.123.104:6818/1572793900,v1:192.168.123.104:6819/1572793900] boot 2026-03-09T00:18:42.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:42 vm04 ceph-mon[96438]: osdmap e111: 8 total, 8 up, 8 in 2026-03-09T00:18:42.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:42 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T00:18:42.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:42 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:18:42.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:42 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:18:44.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:43 vm10 ceph-mon[82076]: pgmap v77: 161 pgs: 31 active+undersized, 17 active+undersized+degraded, 113 active+clean; 457 KiB data, 165 MiB used, 160 GiB / 160 GiB avail; 70/723 objects degraded (9.682%) 2026-03-09T00:18:44.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:43 vm10 ceph-mon[82076]: osdmap e112: 8 total, 8 up, 8 in 2026-03-09T00:18:44.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:43 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:18:44.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:43 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:18:44.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:43 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:18:44.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:43 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:18:44.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:43 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:18:44.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:43 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:18:44.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:43 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:18:44.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:43 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:18:44.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:43 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:18:44.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:43 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:18:44.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:43 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:18:44.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:43 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:18:44.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:43 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:18:44.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:43 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:18:44.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:43 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:18:44.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:43 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]: dispatch 2026-03-09T00:18:44.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:43 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]': finished 2026-03-09T00:18:44.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:43 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:18:44.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:43 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon"}]': finished 2026-03-09T00:18:44.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:43 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]: dispatch 2026-03-09T00:18:44.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:43 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]': finished 2026-03-09T00:18:44.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:43 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd"}]: dispatch 2026-03-09T00:18:44.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:43 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mds"}]: dispatch 2026-03-09T00:18:44.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:43 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw"}]: dispatch 2026-03-09T00:18:44.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:43 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]: dispatch 2026-03-09T00:18:44.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:43 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:18:44.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:43 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]: dispatch 2026-03-09T00:18:44.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:43 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.iscsi"}]: dispatch 2026-03-09T00:18:44.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:43 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]: dispatch 2026-03-09T00:18:44.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:43 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]: dispatch 2026-03-09T00:18:44.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:43 vm04 ceph-mon[96438]: pgmap v77: 161 pgs: 31 active+undersized, 17 active+undersized+degraded, 113 active+clean; 457 KiB data, 165 MiB used, 160 GiB / 160 GiB avail; 70/723 objects degraded (9.682%) 2026-03-09T00:18:44.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:43 vm04 ceph-mon[96438]: osdmap e112: 8 total, 8 up, 8 in 2026-03-09T00:18:44.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:43 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:18:44.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:43 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:18:44.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:43 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:18:44.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:43 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:18:44.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:43 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:18:44.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:43 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:18:44.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:43 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:18:44.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:43 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:18:44.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:43 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:18:44.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:43 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:18:44.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:43 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:18:44.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:43 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:18:44.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:43 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:18:44.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:43 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:18:44.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:43 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:18:44.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:43 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]: dispatch 2026-03-09T00:18:44.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:43 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]': finished 2026-03-09T00:18:44.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:43 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:18:44.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:43 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon"}]': finished 2026-03-09T00:18:44.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:43 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]: dispatch 2026-03-09T00:18:44.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:43 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]': finished 2026-03-09T00:18:44.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:43 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd"}]: dispatch 2026-03-09T00:18:44.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:43 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mds"}]: dispatch 2026-03-09T00:18:44.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:43 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw"}]: dispatch 2026-03-09T00:18:44.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:43 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]: dispatch 2026-03-09T00:18:44.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:43 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:18:44.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:43 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]: dispatch 2026-03-09T00:18:44.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:43 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.iscsi"}]: dispatch 2026-03-09T00:18:44.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:43 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]: dispatch 2026-03-09T00:18:44.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:43 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]: dispatch 2026-03-09T00:18:44.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:43 vm04 ceph-mon[94619]: pgmap v77: 161 pgs: 31 active+undersized, 17 active+undersized+degraded, 113 active+clean; 457 KiB data, 165 MiB used, 160 GiB / 160 GiB avail; 70/723 objects degraded (9.682%) 2026-03-09T00:18:44.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:43 vm04 ceph-mon[94619]: osdmap e112: 8 total, 8 up, 8 in 2026-03-09T00:18:44.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:43 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:18:44.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:43 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:18:44.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:43 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:18:44.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:43 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:18:44.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:43 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:18:44.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:43 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:18:44.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:43 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:18:44.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:43 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:18:44.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:43 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:18:44.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:43 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:18:44.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:43 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:18:44.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:43 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:18:44.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:43 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:18:44.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:43 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:18:44.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:43 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:18:44.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:43 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]: dispatch 2026-03-09T00:18:44.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:43 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]': finished 2026-03-09T00:18:44.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:43 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:18:44.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:43 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon"}]': finished 2026-03-09T00:18:44.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:43 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]: dispatch 2026-03-09T00:18:44.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:43 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]': finished 2026-03-09T00:18:44.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:43 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd"}]: dispatch 2026-03-09T00:18:44.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:43 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mds"}]: dispatch 2026-03-09T00:18:44.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:43 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw"}]: dispatch 2026-03-09T00:18:44.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:43 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]: dispatch 2026-03-09T00:18:44.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:43 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:18:44.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:43 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]: dispatch 2026-03-09T00:18:44.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:43 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.iscsi"}]: dispatch 2026-03-09T00:18:44.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:43 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]: dispatch 2026-03-09T00:18:44.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:43 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]: dispatch 2026-03-09T00:18:45.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:44 vm10 ceph-mon[82076]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:18:45.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:44 vm10 ceph-mon[82076]: Upgrade: Setting container_image for all nvmeof 2026-03-09T00:18:45.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:44 vm10 ceph-mon[82076]: Upgrade: Finalizing container_image settings 2026-03-09T00:18:45.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:44 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]': finished 2026-03-09T00:18:45.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:44 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:18:45.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:44 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:18:45.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:44 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:18:45.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:44 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:18:45.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:44 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:18:45.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:44 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:18:45.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:44 vm10 ceph-mon[82076]: Upgrade: Complete! 2026-03-09T00:18:45.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:44 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]: dispatch 2026-03-09T00:18:45.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:44 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]': finished 2026-03-09T00:18:45.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:44 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:18:45.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:44 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:18:45.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:44 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:18:45.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:44 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:18:45.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:44 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:18:45.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:44 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:18:45.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:44 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:18:45.329 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:44 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:18:45.351 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:18:45 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:18:45] "GET /metrics HTTP/1.1" 200 37626 "" "Prometheus/2.51.0" 2026-03-09T00:18:45.362 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:44 vm04 ceph-mon[96438]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:18:45.362 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:44 vm04 ceph-mon[96438]: Upgrade: Setting container_image for all nvmeof 2026-03-09T00:18:45.362 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:44 vm04 ceph-mon[96438]: Upgrade: Finalizing container_image settings 2026-03-09T00:18:45.362 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:44 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]': finished 2026-03-09T00:18:45.362 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:44 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:18:45.362 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:44 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:18:45.362 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:44 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:18:45.362 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:44 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:18:45.362 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:44 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:18:45.362 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:44 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:18:45.362 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:44 vm04 ceph-mon[96438]: Upgrade: Complete! 2026-03-09T00:18:45.362 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:44 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]: dispatch 2026-03-09T00:18:45.362 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:44 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]': finished 2026-03-09T00:18:45.362 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:44 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:18:45.362 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:44 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:18:45.362 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:44 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:18:45.362 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:44 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:18:45.362 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:44 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:18:45.362 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:44 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:18:45.362 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:44 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:18:45.362 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:44 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:18:45.362 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:44 vm04 ceph-mon[94619]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:18:45.363 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:44 vm04 ceph-mon[94619]: Upgrade: Setting container_image for all nvmeof 2026-03-09T00:18:45.363 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:44 vm04 ceph-mon[94619]: Upgrade: Finalizing container_image settings 2026-03-09T00:18:45.363 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:44 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]': finished 2026-03-09T00:18:45.363 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:44 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:18:45.363 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:44 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:18:45.363 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:44 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:18:45.363 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:44 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:18:45.363 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:44 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:18:45.363 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:44 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:18:45.363 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:44 vm04 ceph-mon[94619]: Upgrade: Complete! 2026-03-09T00:18:45.363 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:44 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]: dispatch 2026-03-09T00:18:45.363 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:44 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]': finished 2026-03-09T00:18:45.363 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:44 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:18:45.363 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:44 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:18:45.363 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:44 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:18:45.363 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:44 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:18:45.363 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:44 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:18:45.363 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:44 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:18:45.363 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:44 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:18:45.363 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:44 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:18:46.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:46 vm10 ceph-mon[82076]: pgmap v79: 161 pgs: 3 activating, 21 peering, 18 active+undersized, 6 active+undersized+degraded, 113 active+clean; 457 KiB data, 165 MiB used, 160 GiB / 160 GiB avail; 33/723 objects degraded (4.564%) 2026-03-09T00:18:46.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:46 vm10 ceph-mon[82076]: Health check update: Degraded data redundancy: 33/723 objects degraded (4.564%), 6 pgs degraded (PG_DEGRADED) 2026-03-09T00:18:46.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:46 vm04 ceph-mon[96438]: pgmap v79: 161 pgs: 3 activating, 21 peering, 18 active+undersized, 6 active+undersized+degraded, 113 active+clean; 457 KiB data, 165 MiB used, 160 GiB / 160 GiB avail; 33/723 objects degraded (4.564%) 2026-03-09T00:18:46.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:46 vm04 ceph-mon[96438]: Health check update: Degraded data redundancy: 33/723 objects degraded (4.564%), 6 pgs degraded (PG_DEGRADED) 2026-03-09T00:18:46.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:46 vm04 ceph-mon[94619]: pgmap v79: 161 pgs: 3 activating, 21 peering, 18 active+undersized, 6 active+undersized+degraded, 113 active+clean; 457 KiB data, 165 MiB used, 160 GiB / 160 GiB avail; 33/723 objects degraded (4.564%) 2026-03-09T00:18:46.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:46 vm04 ceph-mon[94619]: Health check update: Degraded data redundancy: 33/723 objects degraded (4.564%), 6 pgs degraded (PG_DEGRADED) 2026-03-09T00:18:47.301 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:18:47 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:18:47.011Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:18:47.301 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:18:47 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:18:47.012Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:18:47.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:47 vm10 ceph-mon[82076]: pgmap v80: 161 pgs: 3 activating, 21 peering, 137 active+clean; 457 KiB data, 165 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:18:47.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:47 vm10 ceph-mon[82076]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 33/723 objects degraded (4.564%), 6 pgs degraded) 2026-03-09T00:18:47.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:47 vm10 ceph-mon[82076]: Cluster is now healthy 2026-03-09T00:18:47.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:47 vm04 ceph-mon[96438]: pgmap v80: 161 pgs: 3 activating, 21 peering, 137 active+clean; 457 KiB data, 165 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:18:47.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:47 vm04 ceph-mon[96438]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 33/723 objects degraded (4.564%), 6 pgs degraded) 2026-03-09T00:18:47.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:47 vm04 ceph-mon[96438]: Cluster is now healthy 2026-03-09T00:18:47.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:47 vm04 ceph-mon[94619]: pgmap v80: 161 pgs: 3 activating, 21 peering, 137 active+clean; 457 KiB data, 165 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:18:47.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:47 vm04 ceph-mon[94619]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 33/723 objects degraded (4.564%), 6 pgs degraded) 2026-03-09T00:18:47.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:47 vm04 ceph-mon[94619]: Cluster is now healthy 2026-03-09T00:18:48.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:48 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:18:48.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:48 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:18:48.869 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:48 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:18:49.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:49 vm10 ceph-mon[82076]: pgmap v81: 161 pgs: 3 activating, 158 active+clean; 457 KiB data, 165 MiB used, 160 GiB / 160 GiB avail; 1.3 KiB/s rd, 1 op/s 2026-03-09T00:18:49.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:49 vm04 ceph-mon[96438]: pgmap v81: 161 pgs: 3 activating, 158 active+clean; 457 KiB data, 165 MiB used, 160 GiB / 160 GiB avail; 1.3 KiB/s rd, 1 op/s 2026-03-09T00:18:49.854 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:49 vm04 ceph-mon[94619]: pgmap v81: 161 pgs: 3 activating, 158 active+clean; 457 KiB data, 165 MiB used, 160 GiB / 160 GiB avail; 1.3 KiB/s rd, 1 op/s 2026-03-09T00:18:51.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:51 vm10 ceph-mon[82076]: pgmap v82: 161 pgs: 161 active+clean; 457 KiB data, 166 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:18:51.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:51 vm04 ceph-mon[96438]: pgmap v82: 161 pgs: 161 active+clean; 457 KiB data, 166 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:18:51.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:51 vm04 ceph-mon[94619]: pgmap v82: 161 pgs: 161 active+clean; 457 KiB data, 166 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:18:53.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:53 vm10 ceph-mon[82076]: pgmap v83: 161 pgs: 161 active+clean; 457 KiB data, 166 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-09T00:18:53.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:53 vm04 ceph-mon[96438]: pgmap v83: 161 pgs: 161 active+clean; 457 KiB data, 166 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-09T00:18:53.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:53 vm04 ceph-mon[94619]: pgmap v83: 161 pgs: 161 active+clean; 457 KiB data, 166 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-09T00:18:54.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:54 vm10 ceph-mon[82076]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:18:54.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:54 vm04 ceph-mon[96438]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:18:54.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:54 vm04 ceph-mon[94619]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:18:55.350 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:18:55 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:18:55] "GET /metrics HTTP/1.1" 200 37626 "" "Prometheus/2.51.0" 2026-03-09T00:18:55.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:55 vm10 ceph-mon[82076]: pgmap v84: 161 pgs: 161 active+clean; 457 KiB data, 166 MiB used, 160 GiB / 160 GiB avail; 1.3 KiB/s rd, 1 op/s 2026-03-09T00:18:55.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:55 vm04 ceph-mon[96438]: pgmap v84: 161 pgs: 161 active+clean; 457 KiB data, 166 MiB used, 160 GiB / 160 GiB avail; 1.3 KiB/s rd, 1 op/s 2026-03-09T00:18:55.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:55 vm04 ceph-mon[94619]: pgmap v84: 161 pgs: 161 active+clean; 457 KiB data, 166 MiB used, 160 GiB / 160 GiB avail; 1.3 KiB/s rd, 1 op/s 2026-03-09T00:18:57.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:18:57 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:18:57.012Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:18:57.351 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:18:57 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:18:57.013Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:18:57.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:57 vm10 ceph-mon[82076]: pgmap v85: 161 pgs: 161 active+clean; 457 KiB data, 166 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:18:57.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:57 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:18:57.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:57 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:18:57.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:57 vm04 ceph-mon[96438]: pgmap v85: 161 pgs: 161 active+clean; 457 KiB data, 166 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:18:57.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:57 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:18:57.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:57 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:18:57.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:57 vm04 ceph-mon[94619]: pgmap v85: 161 pgs: 161 active+clean; 457 KiB data, 166 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:18:57.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:57 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:18:57.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:57 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:18:59.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:18:59 vm04 ceph-mon[96438]: pgmap v86: 161 pgs: 161 active+clean; 457 KiB data, 166 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:18:59.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:18:59 vm04 ceph-mon[94619]: pgmap v86: 161 pgs: 161 active+clean; 457 KiB data, 166 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:19:00.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:18:59 vm10 ceph-mon[82076]: pgmap v86: 161 pgs: 161 active+clean; 457 KiB data, 166 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:19:00.280 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch ps' 2026-03-09T00:19:00.848 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-09T00:19:00.848 INFO:teuthology.orchestra.run.vm04.stdout:alertmanager.a vm04 *:9093,9094 running (15m) 20s ago 22m 28.2M - 0.25.0 c8568f914cd2 bcac0140b0f6 2026-03-09T00:19:00.848 INFO:teuthology.orchestra.run.vm04.stdout:grafana.a vm10 *:3000 running (3m) 2m ago 22m 78.5M - 10.4.0 c8b91775d855 aa7f793dcb8e 2026-03-09T00:19:00.848 INFO:teuthology.orchestra.run.vm04.stdout:iscsi.foo.vm04.fbyciv vm04 running (3m) 20s ago 22m 51.7M - 3.5 e1d6a67b021e cdb4168e72eb 2026-03-09T00:19:00.848 INFO:teuthology.orchestra.run.vm04.stdout:mgr.x vm10 *:8443,9283,8765 running (3m) 2m ago 24m 485M - 19.2.3-678-ge911bdeb 654f31e6858e c971ca6e9652 2026-03-09T00:19:00.848 INFO:teuthology.orchestra.run.vm04.stdout:mgr.y vm04 *:8443,9283,8765 running (12m) 20s ago 24m 556M - 19.2.3-678-ge911bdeb 654f31e6858e 72a51572b51b 2026-03-09T00:19:00.848 INFO:teuthology.orchestra.run.vm04.stdout:mon.a vm04 running (2m) 20s ago 25m 52.5M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 3a1ecb9ee7d1 2026-03-09T00:19:00.848 INFO:teuthology.orchestra.run.vm04.stdout:mon.b vm10 running (3m) 2m ago 24m 41.3M 2048M 19.2.3-678-ge911bdeb 654f31e6858e b102ade927df 2026-03-09T00:19:00.848 INFO:teuthology.orchestra.run.vm04.stdout:mon.c vm04 running (2m) 20s ago 24m 40.3M 2048M 19.2.3-678-ge911bdeb 654f31e6858e d5119f6d2345 2026-03-09T00:19:00.848 INFO:teuthology.orchestra.run.vm04.stdout:node-exporter.a vm04 *:9100 running (15m) 20s ago 22m 9.77M - 1.7.0 72c9c2088986 38e0af6b2fbf 2026-03-09T00:19:00.848 INFO:teuthology.orchestra.run.vm04.stdout:node-exporter.b vm10 *:9100 running (15m) 2m ago 22m 10.1M - 1.7.0 72c9c2088986 d059c0022310 2026-03-09T00:19:00.848 INFO:teuthology.orchestra.run.vm04.stdout:osd.0 vm04 running (103s) 20s ago 24m 69.5M 4096M 19.2.3-678-ge911bdeb 654f31e6858e a5eb77bcb38b 2026-03-09T00:19:00.848 INFO:teuthology.orchestra.run.vm04.stdout:osd.1 vm04 running (79s) 20s ago 23m 48.9M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 522cf40e592d 2026-03-09T00:19:00.849 INFO:teuthology.orchestra.run.vm04.stdout:osd.2 vm04 running (23s) 20s ago 23m 14.4M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 69a18f90367f 2026-03-09T00:19:00.849 INFO:teuthology.orchestra.run.vm04.stdout:osd.3 vm04 running (23m) 20s ago 23m 58.0M 4096M 17.2.0 e1d6a67b021e d530f6e786d9 2026-03-09T00:19:00.849 INFO:teuthology.orchestra.run.vm04.stdout:osd.4 vm10 running (23m) 2m ago 23m 55.0M 4096M 17.2.0 e1d6a67b021e ad302e6f363c 2026-03-09T00:19:00.849 INFO:teuthology.orchestra.run.vm04.stdout:osd.5 vm10 running (23m) 2m ago 23m 53.8M 4096M 17.2.0 e1d6a67b021e 1bef86fdb303 2026-03-09T00:19:00.849 INFO:teuthology.orchestra.run.vm04.stdout:osd.6 vm10 running (23m) 2m ago 23m 52.4M 4096M 17.2.0 e1d6a67b021e 168db5828111 2026-03-09T00:19:00.849 INFO:teuthology.orchestra.run.vm04.stdout:osd.7 vm10 running (22m) 2m ago 22m 57.2M 4096M 17.2.0 e1d6a67b021e bc6bbac15079 2026-03-09T00:19:00.849 INFO:teuthology.orchestra.run.vm04.stdout:prometheus.a vm10 *:9095 running (3m) 2m ago 22m 51.3M - 2.51.0 1d3b7f56885b 1f53121cfa7f 2026-03-09T00:19:00.849 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.ehrfsf vm04 *:8000 running (22m) 20s ago 22m 100M - 17.2.0 e1d6a67b021e a815abb0c790 2026-03-09T00:19:00.849 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm10.dwizvi vm10 *:8000 running (22m) 2m ago 22m 97.3M - 17.2.0 e1d6a67b021e f6412acdf6e0 2026-03-09T00:19:00.899 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph versions | jq -e '"'"'.osd | length == 2'"'"'' 2026-03-09T00:19:01.435 INFO:teuthology.orchestra.run.vm04.stdout:true 2026-03-09T00:19:01.494 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch upgrade check quay.ceph.io/ceph-ci/ceph:$sha1 | jq -e '"'"'.up_to_date | length == 8'"'"'' 2026-03-09T00:19:01.693 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:01 vm04 ceph-mon[96438]: pgmap v87: 161 pgs: 161 active+clean; 457 KiB data, 166 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:19:01.693 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:01 vm04 ceph-mon[96438]: from='client.44283 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:19:01.693 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:01 vm04 ceph-mon[96438]: from='client.? 192.168.123.104:0/3202874614' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:19:01.696 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:01 vm04 ceph-mon[94619]: pgmap v87: 161 pgs: 161 active+clean; 457 KiB data, 166 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:19:01.696 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:01 vm04 ceph-mon[94619]: from='client.44283 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:19:01.696 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:01 vm04 ceph-mon[94619]: from='client.? 192.168.123.104:0/3202874614' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:19:02.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:01 vm10 ceph-mon[82076]: pgmap v87: 161 pgs: 161 active+clean; 457 KiB data, 166 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:19:02.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:01 vm10 ceph-mon[82076]: from='client.44283 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:19:02.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:01 vm10 ceph-mon[82076]: from='client.? 192.168.123.104:0/3202874614' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:19:02.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:02 vm04 ceph-mon[96438]: from='client.34316 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:19:02.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:02 vm04 ceph-mon[94619]: from='client.34316 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:19:03.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:02 vm10 ceph-mon[82076]: from='client.34316 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:19:03.586 INFO:teuthology.orchestra.run.vm04.stdout:true 2026-03-09T00:19:03.633 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:03 vm04 ceph-mon[94619]: pgmap v88: 161 pgs: 161 active+clean; 457 KiB data, 166 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:19:03.633 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:03 vm04 ceph-mon[94619]: from='client.54280 -' entity='client.admin' cmd=[{"prefix": "orch upgrade check", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:19:03.633 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:03 vm04 ceph-mon[96438]: pgmap v88: 161 pgs: 161 active+clean; 457 KiB data, 166 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:19:03.633 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:03 vm04 ceph-mon[96438]: from='client.54280 -' entity='client.admin' cmd=[{"prefix": "orch upgrade check", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:19:03.649 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch upgrade status' 2026-03-09T00:19:04.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:03 vm10 ceph-mon[82076]: pgmap v88: 161 pgs: 161 active+clean; 457 KiB data, 166 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:19:04.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:03 vm10 ceph-mon[82076]: from='client.54280 -' entity='client.admin' cmd=[{"prefix": "orch upgrade check", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:19:04.159 INFO:teuthology.orchestra.run.vm04.stdout:{ 2026-03-09T00:19:04.159 INFO:teuthology.orchestra.run.vm04.stdout: "target_image": null, 2026-03-09T00:19:04.159 INFO:teuthology.orchestra.run.vm04.stdout: "in_progress": false, 2026-03-09T00:19:04.159 INFO:teuthology.orchestra.run.vm04.stdout: "which": "", 2026-03-09T00:19:04.159 INFO:teuthology.orchestra.run.vm04.stdout: "services_complete": [], 2026-03-09T00:19:04.159 INFO:teuthology.orchestra.run.vm04.stdout: "progress": null, 2026-03-09T00:19:04.160 INFO:teuthology.orchestra.run.vm04.stdout: "message": "", 2026-03-09T00:19:04.160 INFO:teuthology.orchestra.run.vm04.stdout: "is_paused": false 2026-03-09T00:19:04.160 INFO:teuthology.orchestra.run.vm04.stdout:} 2026-03-09T00:19:04.210 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph health detail' 2026-03-09T00:19:04.722 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_OK 2026-03-09T00:19:04.722 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:04 vm04 ceph-mon[96438]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:19:04.722 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:04 vm04 ceph-mon[94619]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:19:04.784 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types crash,osd' 2026-03-09T00:19:04.979 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:04 vm10 ceph-mon[82076]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:19:05.352 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:19:05 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:19:05] "GET /metrics HTTP/1.1" 200 37756 "" "Prometheus/2.51.0" 2026-03-09T00:19:06.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:05 vm10 ceph-mon[82076]: pgmap v89: 161 pgs: 161 active+clean; 457 KiB data, 166 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:19:06.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:05 vm10 ceph-mon[82076]: from='client.54286 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:19:06.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:05 vm10 ceph-mon[82076]: from='client.? 192.168.123.104:0/3503691045' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T00:19:06.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:05 vm04 ceph-mon[94619]: pgmap v89: 161 pgs: 161 active+clean; 457 KiB data, 166 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:19:06.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:05 vm04 ceph-mon[94619]: from='client.54286 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:19:06.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:05 vm04 ceph-mon[94619]: from='client.? 192.168.123.104:0/3503691045' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T00:19:06.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:05 vm04 ceph-mon[96438]: pgmap v89: 161 pgs: 161 active+clean; 457 KiB data, 166 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:19:06.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:05 vm04 ceph-mon[96438]: from='client.54286 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:19:06.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:05 vm04 ceph-mon[96438]: from='client.? 192.168.123.104:0/3503691045' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T00:19:06.841 INFO:teuthology.orchestra.run.vm04.stdout:Initiating upgrade to quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T00:19:06.869 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:06 vm04 ceph-mon[94619]: from='client.34334 -' entity='client.admin' cmd=[{"prefix": "orch upgrade start", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "daemon_types": "crash,osd", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:19:06.870 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:06 vm04 ceph-mon[96438]: from='client.34334 -' entity='client.admin' cmd=[{"prefix": "orch upgrade start", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "daemon_types": "crash,osd", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:19:06.900 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'while ceph orch upgrade status | jq '"'"'.in_progress'"'"' | grep true && ! ceph orch upgrade status | jq '"'"'.message'"'"' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; sleep 30 ; done' 2026-03-09T00:19:07.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:06 vm10 ceph-mon[82076]: from='client.34334 -' entity='client.admin' cmd=[{"prefix": "orch upgrade start", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "daemon_types": "crash,osd", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:19:07.127 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:19:07 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:19:07.013Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:19:07.127 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:19:07 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:19:07.014Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:19:07.474 INFO:teuthology.orchestra.run.vm04.stdout:true 2026-03-09T00:19:07.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:07 vm04 ceph-mon[94619]: pgmap v90: 161 pgs: 161 active+clean; 457 KiB data, 166 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:19:07.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:07 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:07.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:07 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:19:07.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:07 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:19:07.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:07 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:19:07.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:07 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:07.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:07 vm04 ceph-mon[96438]: pgmap v90: 161 pgs: 161 active+clean; 457 KiB data, 166 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:19:07.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:07 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:07.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:07 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:19:07.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:07 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:19:07.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:07 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:19:07.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:07 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:07.889 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-09T00:19:07.890 INFO:teuthology.orchestra.run.vm04.stdout:alertmanager.a vm04 *:9093,9094 running (15m) 27s ago 22m 28.2M - 0.25.0 c8568f914cd2 bcac0140b0f6 2026-03-09T00:19:07.890 INFO:teuthology.orchestra.run.vm04.stdout:grafana.a vm10 *:3000 running (3m) 2m ago 22m 78.5M - 10.4.0 c8b91775d855 aa7f793dcb8e 2026-03-09T00:19:07.890 INFO:teuthology.orchestra.run.vm04.stdout:iscsi.foo.vm04.fbyciv vm04 running (3m) 27s ago 22m 51.7M - 3.5 e1d6a67b021e cdb4168e72eb 2026-03-09T00:19:07.890 INFO:teuthology.orchestra.run.vm04.stdout:mgr.x vm10 *:8443,9283,8765 running (3m) 2m ago 24m 485M - 19.2.3-678-ge911bdeb 654f31e6858e c971ca6e9652 2026-03-09T00:19:07.890 INFO:teuthology.orchestra.run.vm04.stdout:mgr.y vm04 *:8443,9283,8765 running (13m) 27s ago 25m 556M - 19.2.3-678-ge911bdeb 654f31e6858e 72a51572b51b 2026-03-09T00:19:07.890 INFO:teuthology.orchestra.run.vm04.stdout:mon.a vm04 running (2m) 27s ago 25m 52.5M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 3a1ecb9ee7d1 2026-03-09T00:19:07.890 INFO:teuthology.orchestra.run.vm04.stdout:mon.b vm10 running (3m) 2m ago 24m 41.3M 2048M 19.2.3-678-ge911bdeb 654f31e6858e b102ade927df 2026-03-09T00:19:07.890 INFO:teuthology.orchestra.run.vm04.stdout:mon.c vm04 running (2m) 27s ago 24m 40.3M 2048M 19.2.3-678-ge911bdeb 654f31e6858e d5119f6d2345 2026-03-09T00:19:07.890 INFO:teuthology.orchestra.run.vm04.stdout:node-exporter.a vm04 *:9100 running (15m) 27s ago 22m 9.77M - 1.7.0 72c9c2088986 38e0af6b2fbf 2026-03-09T00:19:07.890 INFO:teuthology.orchestra.run.vm04.stdout:node-exporter.b vm10 *:9100 running (15m) 2m ago 22m 10.1M - 1.7.0 72c9c2088986 d059c0022310 2026-03-09T00:19:07.890 INFO:teuthology.orchestra.run.vm04.stdout:osd.0 vm04 running (110s) 27s ago 24m 69.5M 4096M 19.2.3-678-ge911bdeb 654f31e6858e a5eb77bcb38b 2026-03-09T00:19:07.890 INFO:teuthology.orchestra.run.vm04.stdout:osd.1 vm04 running (86s) 27s ago 24m 48.9M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 522cf40e592d 2026-03-09T00:19:07.890 INFO:teuthology.orchestra.run.vm04.stdout:osd.2 vm04 running (30s) 27s ago 23m 14.4M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 69a18f90367f 2026-03-09T00:19:07.890 INFO:teuthology.orchestra.run.vm04.stdout:osd.3 vm04 running (23m) 27s ago 23m 58.0M 4096M 17.2.0 e1d6a67b021e d530f6e786d9 2026-03-09T00:19:07.890 INFO:teuthology.orchestra.run.vm04.stdout:osd.4 vm10 running (23m) 2m ago 23m 55.0M 4096M 17.2.0 e1d6a67b021e ad302e6f363c 2026-03-09T00:19:07.890 INFO:teuthology.orchestra.run.vm04.stdout:osd.5 vm10 running (23m) 2m ago 23m 53.8M 4096M 17.2.0 e1d6a67b021e 1bef86fdb303 2026-03-09T00:19:07.890 INFO:teuthology.orchestra.run.vm04.stdout:osd.6 vm10 running (23m) 2m ago 23m 52.4M 4096M 17.2.0 e1d6a67b021e 168db5828111 2026-03-09T00:19:07.890 INFO:teuthology.orchestra.run.vm04.stdout:osd.7 vm10 running (23m) 2m ago 23m 57.2M 4096M 17.2.0 e1d6a67b021e bc6bbac15079 2026-03-09T00:19:07.890 INFO:teuthology.orchestra.run.vm04.stdout:prometheus.a vm10 *:9095 running (3m) 2m ago 22m 51.3M - 2.51.0 1d3b7f56885b 1f53121cfa7f 2026-03-09T00:19:07.890 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.ehrfsf vm04 *:8000 running (22m) 27s ago 22m 100M - 17.2.0 e1d6a67b021e a815abb0c790 2026-03-09T00:19:07.890 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm10.dwizvi vm10 *:8000 running (22m) 2m ago 22m 97.3M - 17.2.0 e1d6a67b021e f6412acdf6e0 2026-03-09T00:19:08.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:07 vm10 ceph-mon[82076]: pgmap v90: 161 pgs: 161 active+clean; 457 KiB data, 166 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:19:08.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:07 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:08.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:07 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:19:08.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:07 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:19:08.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:07 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:19:08.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:07 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:08.128 INFO:teuthology.orchestra.run.vm04.stdout:{ 2026-03-09T00:19:08.128 INFO:teuthology.orchestra.run.vm04.stdout: "mon": { 2026-03-09T00:19:08.128 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 3 2026-03-09T00:19:08.128 INFO:teuthology.orchestra.run.vm04.stdout: }, 2026-03-09T00:19:08.129 INFO:teuthology.orchestra.run.vm04.stdout: "mgr": { 2026-03-09T00:19:08.129 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-09T00:19:08.129 INFO:teuthology.orchestra.run.vm04.stdout: }, 2026-03-09T00:19:08.129 INFO:teuthology.orchestra.run.vm04.stdout: "osd": { 2026-03-09T00:19:08.129 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 5, 2026-03-09T00:19:08.129 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 3 2026-03-09T00:19:08.129 INFO:teuthology.orchestra.run.vm04.stdout: }, 2026-03-09T00:19:08.129 INFO:teuthology.orchestra.run.vm04.stdout: "rgw": { 2026-03-09T00:19:08.129 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 2 2026-03-09T00:19:08.129 INFO:teuthology.orchestra.run.vm04.stdout: }, 2026-03-09T00:19:08.129 INFO:teuthology.orchestra.run.vm04.stdout: "overall": { 2026-03-09T00:19:08.129 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 7, 2026-03-09T00:19:08.129 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 8 2026-03-09T00:19:08.129 INFO:teuthology.orchestra.run.vm04.stdout: } 2026-03-09T00:19:08.129 INFO:teuthology.orchestra.run.vm04.stdout:} 2026-03-09T00:19:08.332 INFO:teuthology.orchestra.run.vm04.stdout:{ 2026-03-09T00:19:08.332 INFO:teuthology.orchestra.run.vm04.stdout: "target_image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", 2026-03-09T00:19:08.332 INFO:teuthology.orchestra.run.vm04.stdout: "in_progress": true, 2026-03-09T00:19:08.332 INFO:teuthology.orchestra.run.vm04.stdout: "which": "Upgrading daemons of type(s) crash,osd", 2026-03-09T00:19:08.332 INFO:teuthology.orchestra.run.vm04.stdout: "services_complete": [], 2026-03-09T00:19:08.332 INFO:teuthology.orchestra.run.vm04.stdout: "progress": "", 2026-03-09T00:19:08.332 INFO:teuthology.orchestra.run.vm04.stdout: "message": "Doing first pull of quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df image", 2026-03-09T00:19:08.332 INFO:teuthology.orchestra.run.vm04.stdout: "is_paused": false 2026-03-09T00:19:08.332 INFO:teuthology.orchestra.run.vm04.stdout:} 2026-03-09T00:19:08.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:08 vm04 ceph-mon[94619]: Upgrade: Started with target quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T00:19:08.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:08 vm04 ceph-mon[94619]: Upgrade: First pull of quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T00:19:08.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:08 vm04 ceph-mon[94619]: from='client.54304 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:19:08.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:08 vm04 ceph-mon[94619]: from='client.44328 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:19:08.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:08 vm04 ceph-mon[94619]: from='client.? 192.168.123.104:0/1410249188' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:19:08.852 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:08 vm04 ceph-mon[96438]: Upgrade: Started with target quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T00:19:08.852 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:08 vm04 ceph-mon[96438]: Upgrade: First pull of quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T00:19:08.852 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:08 vm04 ceph-mon[96438]: from='client.54304 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:19:08.852 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:08 vm04 ceph-mon[96438]: from='client.44328 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:19:08.852 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:08 vm04 ceph-mon[96438]: from='client.? 192.168.123.104:0/1410249188' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:19:09.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:08 vm10 ceph-mon[82076]: Upgrade: Started with target quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T00:19:09.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:08 vm10 ceph-mon[82076]: Upgrade: First pull of quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T00:19:09.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:08 vm10 ceph-mon[82076]: from='client.54304 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:19:09.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:08 vm10 ceph-mon[82076]: from='client.44328 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:19:09.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:08 vm10 ceph-mon[82076]: from='client.? 192.168.123.104:0/1410249188' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:19:09.837 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:09 vm04 ceph-mon[96438]: from='client.34349 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:19:09.837 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:09 vm04 ceph-mon[96438]: pgmap v91: 161 pgs: 161 active+clean; 457 KiB data, 166 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:19:09.837 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:09 vm04 ceph-mon[96438]: from='client.34361 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:19:09.837 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:09 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:09.837 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:09 vm04 ceph-mon[96438]: Upgrade: Target is version 19.2.3-678-ge911bdeb (squid) 2026-03-09T00:19:09.837 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:09 vm04 ceph-mon[96438]: Upgrade: Target container is quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, digests ['quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc'] 2026-03-09T00:19:09.837 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:09 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:19:09.837 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:09 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:19:09.837 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:09 vm04 ceph-mon[96438]: Upgrade: Setting container_image for all mgr 2026-03-09T00:19:09.837 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:09 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:09.837 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:09 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:19:09.837 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:09 vm04 ceph-mon[96438]: Upgrade: Setting container_image for all mon 2026-03-09T00:19:09.837 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:09 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:09.837 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:09 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:19:09.837 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:09 vm04 ceph-mon[96438]: Upgrade: Setting container_image for all crash 2026-03-09T00:19:09.837 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:09 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:09.837 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:09 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["3"], "max": 16}]: dispatch 2026-03-09T00:19:09.837 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:09 vm04 ceph-mon[96438]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["3"], "max": 16}]: dispatch 2026-03-09T00:19:09.837 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:09 vm04 ceph-mon[96438]: Upgrade: osd.3 is safe to restart 2026-03-09T00:19:09.837 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:09 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:09.837 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:09 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.3"}]: dispatch 2026-03-09T00:19:09.837 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:09 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:19:09.837 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:09 vm04 ceph-mon[94619]: from='client.34349 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:19:09.837 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:09 vm04 ceph-mon[94619]: pgmap v91: 161 pgs: 161 active+clean; 457 KiB data, 166 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:19:09.837 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:09 vm04 ceph-mon[94619]: from='client.34361 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:19:09.837 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:09 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:09.837 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:09 vm04 ceph-mon[94619]: Upgrade: Target is version 19.2.3-678-ge911bdeb (squid) 2026-03-09T00:19:09.837 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:09 vm04 ceph-mon[94619]: Upgrade: Target container is quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, digests ['quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc'] 2026-03-09T00:19:09.837 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:09 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:19:09.837 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:09 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:19:09.837 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:09 vm04 ceph-mon[94619]: Upgrade: Setting container_image for all mgr 2026-03-09T00:19:09.837 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:09 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:09.837 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:09 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:19:09.837 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:09 vm04 ceph-mon[94619]: Upgrade: Setting container_image for all mon 2026-03-09T00:19:09.837 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:09 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:09.837 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:09 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:19:09.837 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:09 vm04 ceph-mon[94619]: Upgrade: Setting container_image for all crash 2026-03-09T00:19:09.837 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:09 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:09.837 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:09 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["3"], "max": 16}]: dispatch 2026-03-09T00:19:09.837 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:09 vm04 ceph-mon[94619]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["3"], "max": 16}]: dispatch 2026-03-09T00:19:09.837 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:09 vm04 ceph-mon[94619]: Upgrade: osd.3 is safe to restart 2026-03-09T00:19:09.838 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:09 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:09.838 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:09 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.3"}]: dispatch 2026-03-09T00:19:09.838 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:09 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:19:10.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:09 vm10 ceph-mon[82076]: from='client.34349 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:19:10.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:09 vm10 ceph-mon[82076]: pgmap v91: 161 pgs: 161 active+clean; 457 KiB data, 166 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:19:10.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:09 vm10 ceph-mon[82076]: from='client.34361 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:19:10.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:09 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:10.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:09 vm10 ceph-mon[82076]: Upgrade: Target is version 19.2.3-678-ge911bdeb (squid) 2026-03-09T00:19:10.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:09 vm10 ceph-mon[82076]: Upgrade: Target container is quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, digests ['quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc'] 2026-03-09T00:19:10.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:09 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:19:10.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:09 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:19:10.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:09 vm10 ceph-mon[82076]: Upgrade: Setting container_image for all mgr 2026-03-09T00:19:10.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:09 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:10.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:09 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:19:10.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:09 vm10 ceph-mon[82076]: Upgrade: Setting container_image for all mon 2026-03-09T00:19:10.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:09 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:10.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:09 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:19:10.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:09 vm10 ceph-mon[82076]: Upgrade: Setting container_image for all crash 2026-03-09T00:19:10.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:09 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:10.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:09 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["3"], "max": 16}]: dispatch 2026-03-09T00:19:10.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:09 vm10 ceph-mon[82076]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["3"], "max": 16}]: dispatch 2026-03-09T00:19:10.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:09 vm10 ceph-mon[82076]: Upgrade: osd.3 is safe to restart 2026-03-09T00:19:10.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:09 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:10.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:09 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.3"}]: dispatch 2026-03-09T00:19:10.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:09 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:19:10.101 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 09 00:19:09 vm04 systemd[1]: Stopping Ceph osd.3 for fdcbddf6-1b49-11f1-80b0-7392062373f9... 2026-03-09T00:19:10.101 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 09 00:19:09 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-3[62355]: 2026-03-09T00:19:09.911+0000 7f1cd210e700 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.3 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-09T00:19:10.101 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 09 00:19:09 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-3[62355]: 2026-03-09T00:19:09.911+0000 7f1cd210e700 -1 osd.3 112 *** Got signal Terminated *** 2026-03-09T00:19:10.101 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 09 00:19:09 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-3[62355]: 2026-03-09T00:19:09.911+0000 7f1cd210e700 -1 osd.3 112 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-09T00:19:11.042 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:10 vm04 ceph-mon[96438]: Upgrade: Updating osd.3 2026-03-09T00:19:11.043 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:10 vm04 ceph-mon[96438]: Deploying daemon osd.3 on vm04 2026-03-09T00:19:11.043 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:10 vm04 ceph-mon[96438]: osd.3 marked itself down and dead 2026-03-09T00:19:11.043 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 09 00:19:10 vm04 podman[116651]: 2026-03-09 00:19:10.836641813 +0000 UTC m=+0.941248492 container died d530f6e786d9cb4669e39e4099af235a590716b58345b63825fe4fd0b5606336 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-3, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, GIT_BRANCH=HEAD, GIT_CLEAN=True, ceph=True, vcs-type=git, version=8, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.expose-services=, maintainer=Guillaume Abrioux , CEPH_POINT_RELEASE=-17.2.0, com.redhat.component=centos-stream-container, name=centos-stream, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, architecture=x86_64, io.buildah.version=1.19.8, GIT_REPO=https://github.com/ceph/ceph-container.git, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, io.k8s.display-name=CentOS Stream 8, io.openshift.tags=base centos centos-stream, RELEASE=HEAD, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, vendor=Red Hat, Inc., url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, build-date=2022-05-03T08:36:31.336870, distribution-scope=public, release=754) 2026-03-09T00:19:11.043 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 09 00:19:10 vm04 podman[116651]: 2026-03-09 00:19:10.863735616 +0000 UTC m=+0.968342295 container remove d530f6e786d9cb4669e39e4099af235a590716b58345b63825fe4fd0b5606336 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-3, CEPH_POINT_RELEASE=-17.2.0, GIT_BRANCH=HEAD, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, GIT_CLEAN=True, RELEASE=HEAD, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, io.k8s.display-name=CentOS Stream 8, release=754, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, version=8, name=centos-stream, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, vendor=Red Hat, Inc., ceph=True, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, io.buildah.version=1.19.8, maintainer=Guillaume Abrioux , vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, io.openshift.expose-services=, io.openshift.tags=base centos centos-stream, distribution-scope=public, vcs-type=git, GIT_REPO=https://github.com/ceph/ceph-container.git, architecture=x86_64, build-date=2022-05-03T08:36:31.336870, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, com.redhat.component=centos-stream-container) 2026-03-09T00:19:11.043 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 09 00:19:10 vm04 bash[116651]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-3 2026-03-09T00:19:11.043 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 09 00:19:11 vm04 podman[116720]: 2026-03-09 00:19:11.004220411 +0000 UTC m=+0.016983634 container create 9c90fed643268809c6af499c8a8d6898aa29f6b5528d2158569e1ee61a68a641 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-3-deactivate, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True, OSD_FLAVOR=default, io.buildah.version=1.41.3, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.build-date=20260223, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0) 2026-03-09T00:19:11.043 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:10 vm04 ceph-mon[94619]: Upgrade: Updating osd.3 2026-03-09T00:19:11.043 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:10 vm04 ceph-mon[94619]: Deploying daemon osd.3 on vm04 2026-03-09T00:19:11.043 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:10 vm04 ceph-mon[94619]: osd.3 marked itself down and dead 2026-03-09T00:19:11.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:10 vm10 ceph-mon[82076]: Upgrade: Updating osd.3 2026-03-09T00:19:11.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:10 vm10 ceph-mon[82076]: Deploying daemon osd.3 on vm04 2026-03-09T00:19:11.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:10 vm10 ceph-mon[82076]: osd.3 marked itself down and dead 2026-03-09T00:19:11.306 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 09 00:19:11 vm04 podman[116720]: 2026-03-09 00:19:11.042275918 +0000 UTC m=+0.055039141 container init 9c90fed643268809c6af499c8a8d6898aa29f6b5528d2158569e1ee61a68a641 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-3-deactivate, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team , OSD_FLAVOR=default, CEPH_REF=squid, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, ceph=True, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20260223) 2026-03-09T00:19:11.306 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 09 00:19:11 vm04 podman[116720]: 2026-03-09 00:19:11.046721277 +0000 UTC m=+0.059484500 container start 9c90fed643268809c6af499c8a8d6898aa29f6b5528d2158569e1ee61a68a641 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-3-deactivate, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=squid, org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default) 2026-03-09T00:19:11.306 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 09 00:19:11 vm04 podman[116720]: 2026-03-09 00:19:11.050194107 +0000 UTC m=+0.062957330 container attach 9c90fed643268809c6af499c8a8d6898aa29f6b5528d2158569e1ee61a68a641 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-3-deactivate, CEPH_REF=squid, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.vendor=CentOS, ceph=True, OSD_FLAVOR=default, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.schema-version=1.0) 2026-03-09T00:19:11.306 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 09 00:19:11 vm04 podman[116720]: 2026-03-09 00:19:10.997516566 +0000 UTC m=+0.010279798 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc 2026-03-09T00:19:11.306 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 09 00:19:11 vm04 conmon[116731]: conmon 9c90fed643268809c6af : Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-9c90fed643268809c6af499c8a8d6898aa29f6b5528d2158569e1ee61a68a641.scope/container/memory.events 2026-03-09T00:19:11.306 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 09 00:19:11 vm04 podman[116720]: 2026-03-09 00:19:11.182500627 +0000 UTC m=+0.195263850 container died 9c90fed643268809c6af499c8a8d6898aa29f6b5528d2158569e1ee61a68a641 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-3-deactivate, io.buildah.version=1.41.3, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-09T00:19:11.306 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 09 00:19:11 vm04 podman[116720]: 2026-03-09 00:19:11.199348926 +0000 UTC m=+0.212112149 container remove 9c90fed643268809c6af499c8a8d6898aa29f6b5528d2158569e1ee61a68a641 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-3-deactivate, OSD_FLAVOR=default, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.build-date=20260223, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid) 2026-03-09T00:19:11.306 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 09 00:19:11 vm04 systemd[1]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@osd.3.service: Deactivated successfully. 2026-03-09T00:19:11.306 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 09 00:19:11 vm04 systemd[1]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@osd.3.service: Unit process 116731 (conmon) remains running after unit stopped. 2026-03-09T00:19:11.306 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 09 00:19:11 vm04 systemd[1]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@osd.3.service: Unit process 116739 (podman) remains running after unit stopped. 2026-03-09T00:19:11.306 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 09 00:19:11 vm04 systemd[1]: Stopped Ceph osd.3 for fdcbddf6-1b49-11f1-80b0-7392062373f9. 2026-03-09T00:19:11.306 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 09 00:19:11 vm04 systemd[1]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@osd.3.service: Consumed 34.874s CPU time, 225.1M memory peak. 2026-03-09T00:19:11.793 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:11 vm04 ceph-mon[94619]: pgmap v92: 161 pgs: 161 active+clean; 457 KiB data, 166 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:19:11.793 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 09 00:19:11 vm04 systemd[1]: Starting Ceph osd.3 for fdcbddf6-1b49-11f1-80b0-7392062373f9... 2026-03-09T00:19:11.793 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 09 00:19:11 vm04 podman[116820]: 2026-03-09 00:19:11.517419837 +0000 UTC m=+0.020157614 container create 4d80e1d3e35603dbcfcd7a784dc803e7c0ab28b5d740aa264a9bdd35197dd95b (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-3-activate, org.label-schema.schema-version=1.0, CEPH_REF=squid, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.build-date=20260223, OSD_FLAVOR=default, org.label-schema.vendor=CentOS) 2026-03-09T00:19:11.793 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 09 00:19:11 vm04 podman[116820]: 2026-03-09 00:19:11.563146701 +0000 UTC m=+0.065884488 container init 4d80e1d3e35603dbcfcd7a784dc803e7c0ab28b5d740aa264a9bdd35197dd95b (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-3-activate, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid) 2026-03-09T00:19:11.793 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 09 00:19:11 vm04 podman[116820]: 2026-03-09 00:19:11.566688178 +0000 UTC m=+0.069425955 container start 4d80e1d3e35603dbcfcd7a784dc803e7c0ab28b5d740aa264a9bdd35197dd95b (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-3-activate, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid, org.label-schema.build-date=20260223, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, ceph=True, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-09T00:19:11.793 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 09 00:19:11 vm04 podman[116820]: 2026-03-09 00:19:11.572215562 +0000 UTC m=+0.074953349 container attach 4d80e1d3e35603dbcfcd7a784dc803e7c0ab28b5d740aa264a9bdd35197dd95b (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-3-activate, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df) 2026-03-09T00:19:11.793 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 09 00:19:11 vm04 podman[116820]: 2026-03-09 00:19:11.509562073 +0000 UTC m=+0.012299850 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc 2026-03-09T00:19:11.793 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 09 00:19:11 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-3-activate[116832]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T00:19:11.793 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 09 00:19:11 vm04 bash[116820]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T00:19:11.793 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 09 00:19:11 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-3-activate[116832]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T00:19:11.793 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 09 00:19:11 vm04 bash[116820]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T00:19:12.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:11 vm10 ceph-mon[82076]: pgmap v92: 161 pgs: 161 active+clean; 457 KiB data, 166 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:19:12.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:11 vm10 ceph-mon[82076]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-09T00:19:12.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:11 vm10 ceph-mon[82076]: osdmap e113: 8 total, 7 up, 8 in 2026-03-09T00:19:12.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:11 vm04 ceph-mon[94619]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-09T00:19:12.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:11 vm04 ceph-mon[94619]: osdmap e113: 8 total, 7 up, 8 in 2026-03-09T00:19:12.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:11 vm04 ceph-mon[96438]: pgmap v92: 161 pgs: 161 active+clean; 457 KiB data, 166 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:19:12.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:11 vm04 ceph-mon[96438]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-09T00:19:12.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:11 vm04 ceph-mon[96438]: osdmap e113: 8 total, 7 up, 8 in 2026-03-09T00:19:12.527 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 09 00:19:12 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-3-activate[116832]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-09T00:19:12.527 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 09 00:19:12 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-3-activate[116832]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T00:19:12.527 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 09 00:19:12 vm04 bash[116820]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-09T00:19:12.527 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 09 00:19:12 vm04 bash[116820]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T00:19:12.527 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 09 00:19:12 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-3-activate[116832]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T00:19:12.527 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 09 00:19:12 vm04 bash[116820]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T00:19:12.527 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 09 00:19:12 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-3-activate[116832]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-3 2026-03-09T00:19:12.527 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 09 00:19:12 vm04 bash[116820]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-3 2026-03-09T00:19:12.527 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 09 00:19:12 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-3-activate[116832]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-7d0a8ff5-b100-40c5-bd87-232736b38d2c/osd-block-ac348a8b-4e4c-4ce9-84cd-4eafa34927bb --path /var/lib/ceph/osd/ceph-3 --no-mon-config 2026-03-09T00:19:12.527 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 09 00:19:12 vm04 bash[116820]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-7d0a8ff5-b100-40c5-bd87-232736b38d2c/osd-block-ac348a8b-4e4c-4ce9-84cd-4eafa34927bb --path /var/lib/ceph/osd/ceph-3 --no-mon-config 2026-03-09T00:19:12.814 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:12 vm04 ceph-mon[94619]: osdmap e114: 8 total, 7 up, 8 in 2026-03-09T00:19:12.814 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:12 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:12.814 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:12 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:19:12.814 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:12 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:12.814 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:12 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:12.814 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:12 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:19:12.814 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 09 00:19:12 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-3-activate[116832]: Running command: /usr/bin/ln -snf /dev/ceph-7d0a8ff5-b100-40c5-bd87-232736b38d2c/osd-block-ac348a8b-4e4c-4ce9-84cd-4eafa34927bb /var/lib/ceph/osd/ceph-3/block 2026-03-09T00:19:12.814 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 09 00:19:12 vm04 bash[116820]: Running command: /usr/bin/ln -snf /dev/ceph-7d0a8ff5-b100-40c5-bd87-232736b38d2c/osd-block-ac348a8b-4e4c-4ce9-84cd-4eafa34927bb /var/lib/ceph/osd/ceph-3/block 2026-03-09T00:19:12.814 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 09 00:19:12 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-3-activate[116832]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-3/block 2026-03-09T00:19:12.814 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 09 00:19:12 vm04 bash[116820]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-3/block 2026-03-09T00:19:12.814 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 09 00:19:12 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-3-activate[116832]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-3 2026-03-09T00:19:12.814 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 09 00:19:12 vm04 bash[116820]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-3 2026-03-09T00:19:12.814 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 09 00:19:12 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-3-activate[116832]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-3 2026-03-09T00:19:12.814 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 09 00:19:12 vm04 bash[116820]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-3 2026-03-09T00:19:12.814 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 09 00:19:12 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-3-activate[116832]: --> ceph-volume lvm activate successful for osd ID: 3 2026-03-09T00:19:12.815 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 09 00:19:12 vm04 bash[116820]: --> ceph-volume lvm activate successful for osd ID: 3 2026-03-09T00:19:12.815 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 09 00:19:12 vm04 podman[116820]: 2026-03-09 00:19:12.556690258 +0000 UTC m=+1.059428035 container died 4d80e1d3e35603dbcfcd7a784dc803e7c0ab28b5d740aa264a9bdd35197dd95b (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-3-activate, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid, ceph=True, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, org.label-schema.build-date=20260223) 2026-03-09T00:19:12.815 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 09 00:19:12 vm04 podman[116820]: 2026-03-09 00:19:12.584301571 +0000 UTC m=+1.087039348 container remove 4d80e1d3e35603dbcfcd7a784dc803e7c0ab28b5d740aa264a9bdd35197dd95b (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-3-activate, org.opencontainers.image.authors=Ceph Release Team , ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid, OSD_FLAVOR=default, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-09T00:19:12.815 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 09 00:19:12 vm04 podman[117084]: 2026-03-09 00:19:12.678419123 +0000 UTC m=+0.016926969 container create e8bef19a96a6f33d6a4fe24f57b8edb4cf37b521840304036c3c273b88092b87 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_REF=squid, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.build-date=20260223) 2026-03-09T00:19:12.815 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 09 00:19:12 vm04 podman[117084]: 2026-03-09 00:19:12.715168015 +0000 UTC m=+0.053675861 container init e8bef19a96a6f33d6a4fe24f57b8edb4cf37b521840304036c3c273b88092b87 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-3, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_REF=squid, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20260223, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team , io.buildah.version=1.41.3, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git) 2026-03-09T00:19:12.815 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 09 00:19:12 vm04 podman[117084]: 2026-03-09 00:19:12.719322069 +0000 UTC m=+0.057829915 container start e8bef19a96a6f33d6a4fe24f57b8edb4cf37b521840304036c3c273b88092b87 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-3, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, ceph=True, org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0) 2026-03-09T00:19:12.815 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 09 00:19:12 vm04 bash[117084]: e8bef19a96a6f33d6a4fe24f57b8edb4cf37b521840304036c3c273b88092b87 2026-03-09T00:19:12.815 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 09 00:19:12 vm04 podman[117084]: 2026-03-09 00:19:12.671739012 +0000 UTC m=+0.010246858 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc 2026-03-09T00:19:12.815 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 09 00:19:12 vm04 systemd[1]: Started Ceph osd.3 for fdcbddf6-1b49-11f1-80b0-7392062373f9. 2026-03-09T00:19:13.066 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:12 vm04 ceph-mon[96438]: osdmap e114: 8 total, 7 up, 8 in 2026-03-09T00:19:13.066 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:12 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:13.066 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:12 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:19:13.066 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:12 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:13.066 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:12 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:13.066 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:12 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:19:13.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:12 vm10 ceph-mon[82076]: osdmap e114: 8 total, 7 up, 8 in 2026-03-09T00:19:13.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:12 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:13.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:12 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:19:13.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:12 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:13.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:12 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:13.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:12 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:19:13.695 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 09 00:19:13 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-3[117094]: 2026-03-09T00:19:13.538+0000 7fad06e33740 -1 Falling back to public interface 2026-03-09T00:19:14.016 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:13 vm04 ceph-mon[94619]: pgmap v95: 161 pgs: 27 stale+active+clean, 134 active+clean; 457 KiB data, 166 MiB used, 160 GiB / 160 GiB avail; 639 B/s rd, 0 op/s 2026-03-09T00:19:14.016 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:13 vm04 ceph-mon[94619]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:19:14.016 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:13 vm04 ceph-mon[96438]: pgmap v95: 161 pgs: 27 stale+active+clean, 134 active+clean; 457 KiB data, 166 MiB used, 160 GiB / 160 GiB avail; 639 B/s rd, 0 op/s 2026-03-09T00:19:14.016 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:13 vm04 ceph-mon[96438]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:19:14.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:13 vm10 ceph-mon[82076]: pgmap v95: 161 pgs: 27 stale+active+clean, 134 active+clean; 457 KiB data, 166 MiB used, 160 GiB / 160 GiB avail; 639 B/s rd, 0 op/s 2026-03-09T00:19:14.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:13 vm10 ceph-mon[82076]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:19:14.350 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 09 00:19:14 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-3[117094]: 2026-03-09T00:19:14.137+0000 7fad06e33740 -1 osd.3 0 read_superblock omap replica is missing. 2026-03-09T00:19:14.351 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 09 00:19:14 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-3[117094]: 2026-03-09T00:19:14.155+0000 7fad06e33740 -1 osd.3 112 log_to_monitors true 2026-03-09T00:19:15.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:15 vm10 ceph-mon[82076]: pgmap v96: 161 pgs: 20 active+undersized, 13 stale+active+clean, 12 active+undersized+degraded, 116 active+clean; 457 KiB data, 166 MiB used, 160 GiB / 160 GiB avail; 41/723 objects degraded (5.671%) 2026-03-09T00:19:15.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:15 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:15.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:15 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:15.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:15 vm10 ceph-mon[82076]: from='osd.3 [v2:192.168.123.104:6826/3633915053,v1:192.168.123.104:6827/3633915053]' entity='osd.3' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]: dispatch 2026-03-09T00:19:15.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:15 vm10 ceph-mon[82076]: from='osd.3 ' entity='osd.3' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]: dispatch 2026-03-09T00:19:15.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:15 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:15.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:15 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:15.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:15 vm10 ceph-mon[82076]: Health check failed: Degraded data redundancy: 41/723 objects degraded (5.671%), 12 pgs degraded (PG_DEGRADED) 2026-03-09T00:19:15.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:15 vm04 ceph-mon[94619]: pgmap v96: 161 pgs: 20 active+undersized, 13 stale+active+clean, 12 active+undersized+degraded, 116 active+clean; 457 KiB data, 166 MiB used, 160 GiB / 160 GiB avail; 41/723 objects degraded (5.671%) 2026-03-09T00:19:15.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:15 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:15.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:15 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:15.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:15 vm04 ceph-mon[94619]: from='osd.3 [v2:192.168.123.104:6826/3633915053,v1:192.168.123.104:6827/3633915053]' entity='osd.3' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]: dispatch 2026-03-09T00:19:15.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:15 vm04 ceph-mon[94619]: from='osd.3 ' entity='osd.3' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]: dispatch 2026-03-09T00:19:15.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:15 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:15.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:15 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:15.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:15 vm04 ceph-mon[94619]: Health check failed: Degraded data redundancy: 41/723 objects degraded (5.671%), 12 pgs degraded (PG_DEGRADED) 2026-03-09T00:19:15.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:15 vm04 ceph-mon[96438]: pgmap v96: 161 pgs: 20 active+undersized, 13 stale+active+clean, 12 active+undersized+degraded, 116 active+clean; 457 KiB data, 166 MiB used, 160 GiB / 160 GiB avail; 41/723 objects degraded (5.671%) 2026-03-09T00:19:15.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:15 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:15.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:15 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:15.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:15 vm04 ceph-mon[96438]: from='osd.3 [v2:192.168.123.104:6826/3633915053,v1:192.168.123.104:6827/3633915053]' entity='osd.3' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]: dispatch 2026-03-09T00:19:15.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:15 vm04 ceph-mon[96438]: from='osd.3 ' entity='osd.3' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]: dispatch 2026-03-09T00:19:15.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:15 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:15.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:15 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:15.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:15 vm04 ceph-mon[96438]: Health check failed: Degraded data redundancy: 41/723 objects degraded (5.671%), 12 pgs degraded (PG_DEGRADED) 2026-03-09T00:19:15.352 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:19:15 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:19:15] "GET /metrics HTTP/1.1" 200 37756 "" "Prometheus/2.51.0" 2026-03-09T00:19:15.851 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:19:15 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: 2026-03-09T00:19:15.638+0000 7fe15b141640 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (20 PGs are or would become offline) 2026-03-09T00:19:16.352 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 09 00:19:16 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-3[117094]: 2026-03-09T00:19:16.079+0000 7facfe3dd640 -1 osd.3 112 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-09T00:19:16.352 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:16 vm04 ceph-mon[96438]: from='osd.3 ' entity='osd.3' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]': finished 2026-03-09T00:19:16.352 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:16 vm04 ceph-mon[96438]: from='osd.3 [v2:192.168.123.104:6826/3633915053,v1:192.168.123.104:6827/3633915053]' entity='osd.3' cmd=[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm04", "root=default"]}]: dispatch 2026-03-09T00:19:16.352 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:16 vm04 ceph-mon[96438]: osdmap e115: 8 total, 7 up, 8 in 2026-03-09T00:19:16.352 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:16 vm04 ceph-mon[96438]: from='osd.3 ' entity='osd.3' cmd=[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm04", "root=default"]}]: dispatch 2026-03-09T00:19:16.352 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:16 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:16.352 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:16 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:16.352 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:16 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:19:16.352 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:16 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:19:16.352 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:16 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:16.352 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:16 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:19:16.352 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:16 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:19:16.352 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:16 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:19:16.352 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:16 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:19:16.352 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:16 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["4"], "max": 16}]: dispatch 2026-03-09T00:19:16.352 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:16 vm04 ceph-mon[96438]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["4"], "max": 16}]: dispatch 2026-03-09T00:19:16.352 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:16 vm04 ceph-mon[96438]: Upgrade: unsafe to stop osd(s) at this time (20 PGs are or would become offline) 2026-03-09T00:19:16.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:16 vm04 ceph-mon[94619]: from='osd.3 ' entity='osd.3' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]': finished 2026-03-09T00:19:16.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:16 vm04 ceph-mon[94619]: from='osd.3 [v2:192.168.123.104:6826/3633915053,v1:192.168.123.104:6827/3633915053]' entity='osd.3' cmd=[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm04", "root=default"]}]: dispatch 2026-03-09T00:19:16.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:16 vm04 ceph-mon[94619]: osdmap e115: 8 total, 7 up, 8 in 2026-03-09T00:19:16.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:16 vm04 ceph-mon[94619]: from='osd.3 ' entity='osd.3' cmd=[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm04", "root=default"]}]: dispatch 2026-03-09T00:19:16.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:16 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:16.352 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:16 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:16.353 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:16 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:19:16.353 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:16 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:19:16.353 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:16 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:16.353 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:16 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:19:16.353 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:16 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:19:16.353 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:16 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:19:16.353 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:16 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:19:16.353 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:16 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["4"], "max": 16}]: dispatch 2026-03-09T00:19:16.353 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:16 vm04 ceph-mon[94619]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["4"], "max": 16}]: dispatch 2026-03-09T00:19:16.353 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:16 vm04 ceph-mon[94619]: Upgrade: unsafe to stop osd(s) at this time (20 PGs are or would become offline) 2026-03-09T00:19:16.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:16 vm10 ceph-mon[82076]: from='osd.3 ' entity='osd.3' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]': finished 2026-03-09T00:19:16.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:16 vm10 ceph-mon[82076]: from='osd.3 [v2:192.168.123.104:6826/3633915053,v1:192.168.123.104:6827/3633915053]' entity='osd.3' cmd=[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm04", "root=default"]}]: dispatch 2026-03-09T00:19:16.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:16 vm10 ceph-mon[82076]: osdmap e115: 8 total, 7 up, 8 in 2026-03-09T00:19:16.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:16 vm10 ceph-mon[82076]: from='osd.3 ' entity='osd.3' cmd=[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm04", "root=default"]}]: dispatch 2026-03-09T00:19:16.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:16 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:16.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:16 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:16.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:16 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:19:16.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:16 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:19:16.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:16 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:16.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:16 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:19:16.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:16 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:19:16.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:16 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:19:16.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:16 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:19:16.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:16 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["4"], "max": 16}]: dispatch 2026-03-09T00:19:16.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:16 vm10 ceph-mon[82076]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["4"], "max": 16}]: dispatch 2026-03-09T00:19:16.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:16 vm10 ceph-mon[82076]: Upgrade: unsafe to stop osd(s) at this time (20 PGs are or would become offline) 2026-03-09T00:19:17.351 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:19:17 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:19:17.015Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:19:17.351 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:19:17 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:19:17.016Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:19:17.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:17 vm04 ceph-mon[96438]: pgmap v98: 161 pgs: 43 active+undersized, 23 active+undersized+degraded, 95 active+clean; 457 KiB data, 204 MiB used, 160 GiB / 160 GiB avail; 92/723 objects degraded (12.725%) 2026-03-09T00:19:17.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:17 vm04 ceph-mon[96438]: OSD bench result of 32547.009870 IOPS is not within the threshold limit range of 50.000000 IOPS and 500.000000 IOPS for osd.3. IOPS capacity is unchanged at 315.000000 IOPS. The recommendation is to establish the osd's IOPS capacity using other benchmark tools (e.g. Fio) and then override osd_mclock_max_capacity_iops_[hdd|ssd]. 2026-03-09T00:19:17.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:17 vm04 ceph-mon[96438]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-09T00:19:17.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:17 vm04 ceph-mon[96438]: osd.3 [v2:192.168.123.104:6826/3633915053,v1:192.168.123.104:6827/3633915053] boot 2026-03-09T00:19:17.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:17 vm04 ceph-mon[96438]: osdmap e116: 8 total, 8 up, 8 in 2026-03-09T00:19:17.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:17 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T00:19:17.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:17 vm04 ceph-mon[94619]: pgmap v98: 161 pgs: 43 active+undersized, 23 active+undersized+degraded, 95 active+clean; 457 KiB data, 204 MiB used, 160 GiB / 160 GiB avail; 92/723 objects degraded (12.725%) 2026-03-09T00:19:17.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:17 vm04 ceph-mon[94619]: OSD bench result of 32547.009870 IOPS is not within the threshold limit range of 50.000000 IOPS and 500.000000 IOPS for osd.3. IOPS capacity is unchanged at 315.000000 IOPS. The recommendation is to establish the osd's IOPS capacity using other benchmark tools (e.g. Fio) and then override osd_mclock_max_capacity_iops_[hdd|ssd]. 2026-03-09T00:19:17.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:17 vm04 ceph-mon[94619]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-09T00:19:17.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:17 vm04 ceph-mon[94619]: osd.3 [v2:192.168.123.104:6826/3633915053,v1:192.168.123.104:6827/3633915053] boot 2026-03-09T00:19:17.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:17 vm04 ceph-mon[94619]: osdmap e116: 8 total, 8 up, 8 in 2026-03-09T00:19:17.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:17 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T00:19:18.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:17 vm10 ceph-mon[82076]: pgmap v98: 161 pgs: 43 active+undersized, 23 active+undersized+degraded, 95 active+clean; 457 KiB data, 204 MiB used, 160 GiB / 160 GiB avail; 92/723 objects degraded (12.725%) 2026-03-09T00:19:18.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:17 vm10 ceph-mon[82076]: OSD bench result of 32547.009870 IOPS is not within the threshold limit range of 50.000000 IOPS and 500.000000 IOPS for osd.3. IOPS capacity is unchanged at 315.000000 IOPS. The recommendation is to establish the osd's IOPS capacity using other benchmark tools (e.g. Fio) and then override osd_mclock_max_capacity_iops_[hdd|ssd]. 2026-03-09T00:19:18.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:17 vm10 ceph-mon[82076]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-09T00:19:18.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:17 vm10 ceph-mon[82076]: osd.3 [v2:192.168.123.104:6826/3633915053,v1:192.168.123.104:6827/3633915053] boot 2026-03-09T00:19:18.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:17 vm10 ceph-mon[82076]: osdmap e116: 8 total, 8 up, 8 in 2026-03-09T00:19:18.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:17 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T00:19:19.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:19 vm04 ceph-mon[96438]: pgmap v100: 161 pgs: 11 peering, 35 active+undersized, 20 active+undersized+degraded, 95 active+clean; 457 KiB data, 205 MiB used, 160 GiB / 160 GiB avail; 85/723 objects degraded (11.757%) 2026-03-09T00:19:19.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:19 vm04 ceph-mon[96438]: osdmap e117: 8 total, 8 up, 8 in 2026-03-09T00:19:19.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:19 vm04 ceph-mon[94619]: pgmap v100: 161 pgs: 11 peering, 35 active+undersized, 20 active+undersized+degraded, 95 active+clean; 457 KiB data, 205 MiB used, 160 GiB / 160 GiB avail; 85/723 objects degraded (11.757%) 2026-03-09T00:19:19.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:19 vm04 ceph-mon[94619]: osdmap e117: 8 total, 8 up, 8 in 2026-03-09T00:19:19.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:19 vm10 ceph-mon[82076]: pgmap v100: 161 pgs: 11 peering, 35 active+undersized, 20 active+undersized+degraded, 95 active+clean; 457 KiB data, 205 MiB used, 160 GiB / 160 GiB avail; 85/723 objects degraded (11.757%) 2026-03-09T00:19:19.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:19 vm10 ceph-mon[82076]: osdmap e117: 8 total, 8 up, 8 in 2026-03-09T00:19:20.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:20 vm10 ceph-mon[82076]: Health check update: Degraded data redundancy: 41/723 objects degraded (5.671%), 10 pgs degraded (PG_DEGRADED) 2026-03-09T00:19:20.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:20 vm04 ceph-mon[96438]: Health check update: Degraded data redundancy: 41/723 objects degraded (5.671%), 10 pgs degraded (PG_DEGRADED) 2026-03-09T00:19:20.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:20 vm04 ceph-mon[94619]: Health check update: Degraded data redundancy: 41/723 objects degraded (5.671%), 10 pgs degraded (PG_DEGRADED) 2026-03-09T00:19:21.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:21 vm10 ceph-mon[82076]: pgmap v102: 161 pgs: 18 peering, 23 active+undersized, 10 active+undersized+degraded, 110 active+clean; 457 KiB data, 205 MiB used, 160 GiB / 160 GiB avail; 41/723 objects degraded (5.671%) 2026-03-09T00:19:21.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:21 vm04 ceph-mon[94619]: pgmap v102: 161 pgs: 18 peering, 23 active+undersized, 10 active+undersized+degraded, 110 active+clean; 457 KiB data, 205 MiB used, 160 GiB / 160 GiB avail; 41/723 objects degraded (5.671%) 2026-03-09T00:19:21.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:21 vm04 ceph-mon[96438]: pgmap v102: 161 pgs: 18 peering, 23 active+undersized, 10 active+undersized+degraded, 110 active+clean; 457 KiB data, 205 MiB used, 160 GiB / 160 GiB avail; 41/723 objects degraded (5.671%) 2026-03-09T00:19:22.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:22 vm10 ceph-mon[82076]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 41/723 objects degraded (5.671%), 10 pgs degraded) 2026-03-09T00:19:22.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:22 vm10 ceph-mon[82076]: Cluster is now healthy 2026-03-09T00:19:22.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:22 vm04 ceph-mon[96438]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 41/723 objects degraded (5.671%), 10 pgs degraded) 2026-03-09T00:19:22.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:22 vm04 ceph-mon[96438]: Cluster is now healthy 2026-03-09T00:19:22.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:22 vm04 ceph-mon[94619]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 41/723 objects degraded (5.671%), 10 pgs degraded) 2026-03-09T00:19:22.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:22 vm04 ceph-mon[94619]: Cluster is now healthy 2026-03-09T00:19:23.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:23 vm04 ceph-mon[96438]: pgmap v103: 161 pgs: 18 peering, 143 active+clean; 457 KiB data, 185 MiB used, 160 GiB / 160 GiB avail; 151 B/s rd, 0 op/s 2026-03-09T00:19:23.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:23 vm04 ceph-mon[94619]: pgmap v103: 161 pgs: 18 peering, 143 active+clean; 457 KiB data, 185 MiB used, 160 GiB / 160 GiB avail; 151 B/s rd, 0 op/s 2026-03-09T00:19:23.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:23 vm10 ceph-mon[82076]: pgmap v103: 161 pgs: 18 peering, 143 active+clean; 457 KiB data, 185 MiB used, 160 GiB / 160 GiB avail; 151 B/s rd, 0 op/s 2026-03-09T00:19:24.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:24 vm10 ceph-mon[82076]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:19:24.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:24 vm04 ceph-mon[96438]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:19:24.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:24 vm04 ceph-mon[94619]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:19:25.351 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:19:25 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:19:25] "GET /metrics HTTP/1.1" 200 37756 "" "Prometheus/2.51.0" 2026-03-09T00:19:25.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:25 vm04 ceph-mon[96438]: pgmap v104: 161 pgs: 161 active+clean; 457 KiB data, 189 MiB used, 160 GiB / 160 GiB avail; 127 B/s rd, 0 op/s 2026-03-09T00:19:25.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:25 vm04 ceph-mon[94619]: pgmap v104: 161 pgs: 161 active+clean; 457 KiB data, 189 MiB used, 160 GiB / 160 GiB avail; 127 B/s rd, 0 op/s 2026-03-09T00:19:25.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:25 vm10 ceph-mon[82076]: pgmap v104: 161 pgs: 161 active+clean; 457 KiB data, 189 MiB used, 160 GiB / 160 GiB avail; 127 B/s rd, 0 op/s 2026-03-09T00:19:27.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:19:27 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:19:27.015Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:19:27.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:19:27 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:19:27.016Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:19:27.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:27 vm04 ceph-mon[94619]: pgmap v105: 161 pgs: 161 active+clean; 457 KiB data, 189 MiB used, 160 GiB / 160 GiB avail; 697 B/s rd, 0 op/s 2026-03-09T00:19:27.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:27 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:27.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:27 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:19:27.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:27 vm04 ceph-mon[96438]: pgmap v105: 161 pgs: 161 active+clean; 457 KiB data, 189 MiB used, 160 GiB / 160 GiB avail; 697 B/s rd, 0 op/s 2026-03-09T00:19:27.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:27 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:27.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:27 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:19:27.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:27 vm10 ceph-mon[82076]: pgmap v105: 161 pgs: 161 active+clean; 457 KiB data, 189 MiB used, 160 GiB / 160 GiB avail; 697 B/s rd, 0 op/s 2026-03-09T00:19:27.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:27 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:27.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:27 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:19:28.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:28 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:28.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:28 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:28.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:28 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:29.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:29 vm10 ceph-mon[82076]: pgmap v106: 161 pgs: 161 active+clean; 457 KiB data, 189 MiB used, 160 GiB / 160 GiB avail; 614 B/s rd, 0 op/s 2026-03-09T00:19:29.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:29 vm04 ceph-mon[94619]: pgmap v106: 161 pgs: 161 active+clean; 457 KiB data, 189 MiB used, 160 GiB / 160 GiB avail; 614 B/s rd, 0 op/s 2026-03-09T00:19:29.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:29 vm04 ceph-mon[96438]: pgmap v106: 161 pgs: 161 active+clean; 457 KiB data, 189 MiB used, 160 GiB / 160 GiB avail; 614 B/s rd, 0 op/s 2026-03-09T00:19:31.442 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:31 vm10 ceph-mon[82076]: pgmap v107: 161 pgs: 161 active+clean; 457 KiB data, 189 MiB used, 160 GiB / 160 GiB avail; 520 B/s rd, 0 op/s 2026-03-09T00:19:31.442 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:31 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["4"], "max": 16}]: dispatch 2026-03-09T00:19:31.442 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:31 vm10 ceph-mon[82076]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["4"], "max": 16}]: dispatch 2026-03-09T00:19:31.442 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:31 vm10 ceph-mon[82076]: Upgrade: osd.4 is safe to restart 2026-03-09T00:19:31.442 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:31 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:31.442 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:31 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.4"}]: dispatch 2026-03-09T00:19:31.442 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:31 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:19:31.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:31 vm04 ceph-mon[94619]: pgmap v107: 161 pgs: 161 active+clean; 457 KiB data, 189 MiB used, 160 GiB / 160 GiB avail; 520 B/s rd, 0 op/s 2026-03-09T00:19:31.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:31 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["4"], "max": 16}]: dispatch 2026-03-09T00:19:31.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:31 vm04 ceph-mon[94619]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["4"], "max": 16}]: dispatch 2026-03-09T00:19:31.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:31 vm04 ceph-mon[94619]: Upgrade: osd.4 is safe to restart 2026-03-09T00:19:31.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:31 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:31.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:31 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.4"}]: dispatch 2026-03-09T00:19:31.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:31 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:19:31.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:31 vm04 ceph-mon[96438]: pgmap v107: 161 pgs: 161 active+clean; 457 KiB data, 189 MiB used, 160 GiB / 160 GiB avail; 520 B/s rd, 0 op/s 2026-03-09T00:19:31.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:31 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["4"], "max": 16}]: dispatch 2026-03-09T00:19:31.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:31 vm04 ceph-mon[96438]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["4"], "max": 16}]: dispatch 2026-03-09T00:19:31.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:31 vm04 ceph-mon[96438]: Upgrade: osd.4 is safe to restart 2026-03-09T00:19:31.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:31 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:31.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:31 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.4"}]: dispatch 2026-03-09T00:19:31.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:31 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:19:31.717 INFO:journalctl@ceph.osd.4.vm10.stdout:Mar 09 00:19:31 vm10 systemd[1]: Stopping Ceph osd.4 for fdcbddf6-1b49-11f1-80b0-7392062373f9... 2026-03-09T00:19:32.078 INFO:journalctl@ceph.osd.4.vm10.stdout:Mar 09 00:19:31 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-4[52482]: 2026-03-09T00:19:31.790+0000 7f9dcea80700 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.4 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-09T00:19:32.078 INFO:journalctl@ceph.osd.4.vm10.stdout:Mar 09 00:19:31 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-4[52482]: 2026-03-09T00:19:31.790+0000 7f9dcea80700 -1 osd.4 117 *** Got signal Terminated *** 2026-03-09T00:19:32.078 INFO:journalctl@ceph.osd.4.vm10.stdout:Mar 09 00:19:31 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-4[52482]: 2026-03-09T00:19:31.790+0000 7f9dcea80700 -1 osd.4 117 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-09T00:19:32.511 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:32 vm10 ceph-mon[82076]: Upgrade: Updating osd.4 2026-03-09T00:19:32.511 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:32 vm10 ceph-mon[82076]: Deploying daemon osd.4 on vm10 2026-03-09T00:19:32.511 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:32 vm10 ceph-mon[82076]: osd.4 marked itself down and dead 2026-03-09T00:19:32.515 INFO:journalctl@ceph.osd.4.vm10.stdout:Mar 09 00:19:32 vm10 podman[90206]: 2026-03-09 00:19:32.314434904 +0000 UTC m=+0.538313287 container died ad302e6f363cc488aad1d4ce5b6f5ddd703a5f33099edd8e33d1eff0262cc8af (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-4, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, vcs-type=git, CEPH_POINT_RELEASE=-17.2.0, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, distribution-scope=public, name=centos-stream, version=8, io.openshift.expose-services=, vendor=Red Hat, Inc., GIT_REPO=https://github.com/ceph/ceph-container.git, RELEASE=HEAD, architecture=x86_64, GIT_CLEAN=True, com.redhat.component=centos-stream-container, io.buildah.version=1.19.8, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., GIT_BRANCH=HEAD, build-date=2022-05-03T08:36:31.336870, maintainer=Guillaume Abrioux , summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, io.openshift.tags=base centos centos-stream, ceph=True, release=754, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, io.k8s.display-name=CentOS Stream 8, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, com.redhat.license_terms=https://centos.org/legal/licensing-policy/) 2026-03-09T00:19:32.515 INFO:journalctl@ceph.osd.4.vm10.stdout:Mar 09 00:19:32 vm10 podman[90206]: 2026-03-09 00:19:32.341609255 +0000 UTC m=+0.565487628 container remove ad302e6f363cc488aad1d4ce5b6f5ddd703a5f33099edd8e33d1eff0262cc8af (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-4, com.redhat.component=centos-stream-container, GIT_BRANCH=HEAD, vendor=Red Hat, Inc., release=754, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, RELEASE=HEAD, maintainer=Guillaume Abrioux , url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, build-date=2022-05-03T08:36:31.336870, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, io.openshift.expose-services=, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, CEPH_POINT_RELEASE=-17.2.0, io.buildah.version=1.19.8, io.k8s.display-name=CentOS Stream 8, name=centos-stream, distribution-scope=public, ceph=True, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-type=git, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, GIT_CLEAN=True, version=8, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, GIT_REPO=https://github.com/ceph/ceph-container.git, io.openshift.tags=base centos centos-stream, architecture=x86_64) 2026-03-09T00:19:32.515 INFO:journalctl@ceph.osd.4.vm10.stdout:Mar 09 00:19:32 vm10 bash[90206]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-4 2026-03-09T00:19:32.515 INFO:journalctl@ceph.osd.4.vm10.stdout:Mar 09 00:19:32 vm10 podman[90275]: 2026-03-09 00:19:32.478308177 +0000 UTC m=+0.016421907 container create 16e38127ed7a9e711d8a5d69c0d6f6cd2baee4b3c88be59cfa0ea6f5fceebb89 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-4-deactivate, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, ceph=True, org.label-schema.build-date=20260223, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.41.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team ) 2026-03-09T00:19:32.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:32 vm04 ceph-mon[94619]: Upgrade: Updating osd.4 2026-03-09T00:19:32.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:32 vm04 ceph-mon[94619]: Deploying daemon osd.4 on vm10 2026-03-09T00:19:32.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:32 vm04 ceph-mon[94619]: osd.4 marked itself down and dead 2026-03-09T00:19:32.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:32 vm04 ceph-mon[96438]: Upgrade: Updating osd.4 2026-03-09T00:19:32.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:32 vm04 ceph-mon[96438]: Deploying daemon osd.4 on vm10 2026-03-09T00:19:32.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:32 vm04 ceph-mon[96438]: osd.4 marked itself down and dead 2026-03-09T00:19:32.828 INFO:journalctl@ceph.osd.4.vm10.stdout:Mar 09 00:19:32 vm10 podman[90275]: 2026-03-09 00:19:32.521615052 +0000 UTC m=+0.059728793 container init 16e38127ed7a9e711d8a5d69c0d6f6cd2baee4b3c88be59cfa0ea6f5fceebb89 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-4-deactivate, OSD_FLAVOR=default, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.schema-version=1.0, ceph=True, org.label-schema.build-date=20260223, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2) 2026-03-09T00:19:32.828 INFO:journalctl@ceph.osd.4.vm10.stdout:Mar 09 00:19:32 vm10 podman[90275]: 2026-03-09 00:19:32.524842745 +0000 UTC m=+0.062956475 container start 16e38127ed7a9e711d8a5d69c0d6f6cd2baee4b3c88be59cfa0ea6f5fceebb89 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-4-deactivate, org.label-schema.license=GPLv2, ceph=True, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.name=CentOS Stream 9 Base Image) 2026-03-09T00:19:32.828 INFO:journalctl@ceph.osd.4.vm10.stdout:Mar 09 00:19:32 vm10 podman[90275]: 2026-03-09 00:19:32.53200012 +0000 UTC m=+0.070113850 container attach 16e38127ed7a9e711d8a5d69c0d6f6cd2baee4b3c88be59cfa0ea6f5fceebb89 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-4-deactivate, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.schema-version=1.0, org.label-schema.build-date=20260223, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, ceph=True, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, FROM_IMAGE=quay.io/centos/centos:stream9) 2026-03-09T00:19:32.829 INFO:journalctl@ceph.osd.4.vm10.stdout:Mar 09 00:19:32 vm10 podman[90275]: 2026-03-09 00:19:32.471371625 +0000 UTC m=+0.009485364 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc 2026-03-09T00:19:32.829 INFO:journalctl@ceph.osd.4.vm10.stdout:Mar 09 00:19:32 vm10 podman[90275]: 2026-03-09 00:19:32.666307206 +0000 UTC m=+0.204420936 container died 16e38127ed7a9e711d8a5d69c0d6f6cd2baee4b3c88be59cfa0ea6f5fceebb89 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-4-deactivate, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.build-date=20260223, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid, ceph=True, io.buildah.version=1.41.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team ) 2026-03-09T00:19:32.829 INFO:journalctl@ceph.osd.4.vm10.stdout:Mar 09 00:19:32 vm10 podman[90275]: 2026-03-09 00:19:32.68413347 +0000 UTC m=+0.222247200 container remove 16e38127ed7a9e711d8a5d69c0d6f6cd2baee4b3c88be59cfa0ea6f5fceebb89 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-4-deactivate, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.license=GPLv2, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, io.buildah.version=1.41.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.build-date=20260223) 2026-03-09T00:19:32.829 INFO:journalctl@ceph.osd.4.vm10.stdout:Mar 09 00:19:32 vm10 systemd[1]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@osd.4.service: Deactivated successfully. 2026-03-09T00:19:32.829 INFO:journalctl@ceph.osd.4.vm10.stdout:Mar 09 00:19:32 vm10 systemd[1]: Stopped Ceph osd.4 for fdcbddf6-1b49-11f1-80b0-7392062373f9. 2026-03-09T00:19:32.829 INFO:journalctl@ceph.osd.4.vm10.stdout:Mar 09 00:19:32 vm10 systemd[1]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@osd.4.service: Consumed 29.012s CPU time. 2026-03-09T00:19:33.142 INFO:journalctl@ceph.osd.4.vm10.stdout:Mar 09 00:19:32 vm10 systemd[1]: Starting Ceph osd.4 for fdcbddf6-1b49-11f1-80b0-7392062373f9... 2026-03-09T00:19:33.143 INFO:journalctl@ceph.osd.4.vm10.stdout:Mar 09 00:19:32 vm10 podman[90377]: 2026-03-09 00:19:32.982161626 +0000 UTC m=+0.018836275 container create ead893f34fb14bb1e1ac12c5f2aa07a7db4b1737c30ee2ba523ca15b75a0ded2 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-4-activate, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223, io.buildah.version=1.41.3, CEPH_REF=squid, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df) 2026-03-09T00:19:33.143 INFO:journalctl@ceph.osd.4.vm10.stdout:Mar 09 00:19:33 vm10 podman[90377]: 2026-03-09 00:19:33.056348719 +0000 UTC m=+0.093023388 container init ead893f34fb14bb1e1ac12c5f2aa07a7db4b1737c30ee2ba523ca15b75a0ded2 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-4-activate, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223, io.buildah.version=1.41.3, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid) 2026-03-09T00:19:33.143 INFO:journalctl@ceph.osd.4.vm10.stdout:Mar 09 00:19:33 vm10 podman[90377]: 2026-03-09 00:19:33.059330829 +0000 UTC m=+0.096005489 container start ead893f34fb14bb1e1ac12c5f2aa07a7db4b1737c30ee2ba523ca15b75a0ded2 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-4-activate, io.buildah.version=1.41.3, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.license=GPLv2, CEPH_REF=squid, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.vendor=CentOS, ceph=True, org.label-schema.build-date=20260223) 2026-03-09T00:19:33.143 INFO:journalctl@ceph.osd.4.vm10.stdout:Mar 09 00:19:33 vm10 podman[90377]: 2026-03-09 00:19:33.060856906 +0000 UTC m=+0.097531566 container attach ead893f34fb14bb1e1ac12c5f2aa07a7db4b1737c30ee2ba523ca15b75a0ded2 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-4-activate, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, CEPH_REF=squid, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20260223, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, org.opencontainers.image.authors=Ceph Release Team ) 2026-03-09T00:19:33.143 INFO:journalctl@ceph.osd.4.vm10.stdout:Mar 09 00:19:33 vm10 podman[90377]: 2026-03-09 00:19:32.974400861 +0000 UTC m=+0.011075510 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc 2026-03-09T00:19:33.143 INFO:journalctl@ceph.osd.4.vm10.stdout:Mar 09 00:19:33 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-4-activate[90389]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T00:19:33.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:33 vm10 ceph-mon[82076]: pgmap v108: 161 pgs: 161 active+clean; 457 KiB data, 189 MiB used, 160 GiB / 160 GiB avail; 938 B/s rd, 0 op/s 2026-03-09T00:19:33.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:33 vm10 ceph-mon[82076]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-09T00:19:33.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:33 vm10 ceph-mon[82076]: osdmap e118: 8 total, 7 up, 8 in 2026-03-09T00:19:33.578 INFO:journalctl@ceph.osd.4.vm10.stdout:Mar 09 00:19:33 vm10 bash[90377]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T00:19:33.578 INFO:journalctl@ceph.osd.4.vm10.stdout:Mar 09 00:19:33 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-4-activate[90389]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T00:19:33.578 INFO:journalctl@ceph.osd.4.vm10.stdout:Mar 09 00:19:33 vm10 bash[90377]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T00:19:33.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:33 vm04 ceph-mon[94619]: pgmap v108: 161 pgs: 161 active+clean; 457 KiB data, 189 MiB used, 160 GiB / 160 GiB avail; 938 B/s rd, 0 op/s 2026-03-09T00:19:33.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:33 vm04 ceph-mon[94619]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-09T00:19:33.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:33 vm04 ceph-mon[94619]: osdmap e118: 8 total, 7 up, 8 in 2026-03-09T00:19:33.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:33 vm04 ceph-mon[96438]: pgmap v108: 161 pgs: 161 active+clean; 457 KiB data, 189 MiB used, 160 GiB / 160 GiB avail; 938 B/s rd, 0 op/s 2026-03-09T00:19:33.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:33 vm04 ceph-mon[96438]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-09T00:19:33.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:33 vm04 ceph-mon[96438]: osdmap e118: 8 total, 7 up, 8 in 2026-03-09T00:19:33.981 INFO:journalctl@ceph.osd.4.vm10.stdout:Mar 09 00:19:33 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-4-activate[90389]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-09T00:19:33.981 INFO:journalctl@ceph.osd.4.vm10.stdout:Mar 09 00:19:33 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-4-activate[90389]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T00:19:33.981 INFO:journalctl@ceph.osd.4.vm10.stdout:Mar 09 00:19:33 vm10 bash[90377]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-09T00:19:33.981 INFO:journalctl@ceph.osd.4.vm10.stdout:Mar 09 00:19:33 vm10 bash[90377]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T00:19:33.981 INFO:journalctl@ceph.osd.4.vm10.stdout:Mar 09 00:19:33 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-4-activate[90389]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T00:19:33.982 INFO:journalctl@ceph.osd.4.vm10.stdout:Mar 09 00:19:33 vm10 bash[90377]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T00:19:33.982 INFO:journalctl@ceph.osd.4.vm10.stdout:Mar 09 00:19:33 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-4-activate[90389]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-4 2026-03-09T00:19:33.982 INFO:journalctl@ceph.osd.4.vm10.stdout:Mar 09 00:19:33 vm10 bash[90377]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-4 2026-03-09T00:19:33.982 INFO:journalctl@ceph.osd.4.vm10.stdout:Mar 09 00:19:33 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-4-activate[90389]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-8a15fc70-3ecd-4b1c-9dc6-d18a29f18415/osd-block-ffd541f9-68f9-454d-acfc-1323f62f60a0 --path /var/lib/ceph/osd/ceph-4 --no-mon-config 2026-03-09T00:19:33.982 INFO:journalctl@ceph.osd.4.vm10.stdout:Mar 09 00:19:33 vm10 bash[90377]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-8a15fc70-3ecd-4b1c-9dc6-d18a29f18415/osd-block-ffd541f9-68f9-454d-acfc-1323f62f60a0 --path /var/lib/ceph/osd/ceph-4 --no-mon-config 2026-03-09T00:19:33.982 INFO:journalctl@ceph.osd.4.vm10.stdout:Mar 09 00:19:33 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-4-activate[90389]: Running command: /usr/bin/ln -snf /dev/ceph-8a15fc70-3ecd-4b1c-9dc6-d18a29f18415/osd-block-ffd541f9-68f9-454d-acfc-1323f62f60a0 /var/lib/ceph/osd/ceph-4/block 2026-03-09T00:19:34.274 INFO:journalctl@ceph.osd.4.vm10.stdout:Mar 09 00:19:33 vm10 bash[90377]: Running command: /usr/bin/ln -snf /dev/ceph-8a15fc70-3ecd-4b1c-9dc6-d18a29f18415/osd-block-ffd541f9-68f9-454d-acfc-1323f62f60a0 /var/lib/ceph/osd/ceph-4/block 2026-03-09T00:19:34.274 INFO:journalctl@ceph.osd.4.vm10.stdout:Mar 09 00:19:33 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-4-activate[90389]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-4/block 2026-03-09T00:19:34.274 INFO:journalctl@ceph.osd.4.vm10.stdout:Mar 09 00:19:33 vm10 bash[90377]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-4/block 2026-03-09T00:19:34.274 INFO:journalctl@ceph.osd.4.vm10.stdout:Mar 09 00:19:33 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-4-activate[90389]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-0 2026-03-09T00:19:34.274 INFO:journalctl@ceph.osd.4.vm10.stdout:Mar 09 00:19:33 vm10 bash[90377]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-0 2026-03-09T00:19:34.274 INFO:journalctl@ceph.osd.4.vm10.stdout:Mar 09 00:19:33 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-4-activate[90389]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-4 2026-03-09T00:19:34.274 INFO:journalctl@ceph.osd.4.vm10.stdout:Mar 09 00:19:33 vm10 bash[90377]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-4 2026-03-09T00:19:34.274 INFO:journalctl@ceph.osd.4.vm10.stdout:Mar 09 00:19:33 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-4-activate[90389]: --> ceph-volume lvm activate successful for osd ID: 4 2026-03-09T00:19:34.274 INFO:journalctl@ceph.osd.4.vm10.stdout:Mar 09 00:19:33 vm10 bash[90377]: --> ceph-volume lvm activate successful for osd ID: 4 2026-03-09T00:19:34.274 INFO:journalctl@ceph.osd.4.vm10.stdout:Mar 09 00:19:34 vm10 podman[90377]: 2026-03-09 00:19:34.011742042 +0000 UTC m=+1.048416702 container died ead893f34fb14bb1e1ac12c5f2aa07a7db4b1737c30ee2ba523ca15b75a0ded2 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-4-activate, CEPH_REF=squid, ceph=True, org.label-schema.build-date=20260223, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team ) 2026-03-09T00:19:34.274 INFO:journalctl@ceph.osd.4.vm10.stdout:Mar 09 00:19:34 vm10 podman[90377]: 2026-03-09 00:19:34.046636713 +0000 UTC m=+1.083311373 container remove ead893f34fb14bb1e1ac12c5f2aa07a7db4b1737c30ee2ba523ca15b75a0ded2 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-4-activate, CEPH_REF=squid, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team , FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.build-date=20260223, org.label-schema.vendor=CentOS, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True) 2026-03-09T00:19:34.274 INFO:journalctl@ceph.osd.4.vm10.stdout:Mar 09 00:19:34 vm10 podman[90621]: 2026-03-09 00:19:34.177448617 +0000 UTC m=+0.020563268 container create 5cd5f044c1899600c6509a5b85aaaff70d8a83c23b89bbef2eca673eb5dad100 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-4, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team , OSD_FLAVOR=default, org.label-schema.build-date=20260223, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_REF=squid, ceph=True, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git) 2026-03-09T00:19:34.274 INFO:journalctl@ceph.osd.4.vm10.stdout:Mar 09 00:19:34 vm10 podman[90621]: 2026-03-09 00:19:34.208762125 +0000 UTC m=+0.051876787 container init 5cd5f044c1899600c6509a5b85aaaff70d8a83c23b89bbef2eca673eb5dad100 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-4, ceph=True, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.build-date=20260223, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, io.buildah.version=1.41.3, CEPH_REF=squid, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team ) 2026-03-09T00:19:34.274 INFO:journalctl@ceph.osd.4.vm10.stdout:Mar 09 00:19:34 vm10 podman[90621]: 2026-03-09 00:19:34.212188798 +0000 UTC m=+0.055303449 container start 5cd5f044c1899600c6509a5b85aaaff70d8a83c23b89bbef2eca673eb5dad100 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-4, org.label-schema.schema-version=1.0, org.label-schema.build-date=20260223, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.41.3) 2026-03-09T00:19:34.274 INFO:journalctl@ceph.osd.4.vm10.stdout:Mar 09 00:19:34 vm10 bash[90621]: 5cd5f044c1899600c6509a5b85aaaff70d8a83c23b89bbef2eca673eb5dad100 2026-03-09T00:19:34.274 INFO:journalctl@ceph.osd.4.vm10.stdout:Mar 09 00:19:34 vm10 podman[90621]: 2026-03-09 00:19:34.167842417 +0000 UTC m=+0.010957068 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc 2026-03-09T00:19:34.274 INFO:journalctl@ceph.osd.4.vm10.stdout:Mar 09 00:19:34 vm10 systemd[1]: Started Ceph osd.4 for fdcbddf6-1b49-11f1-80b0-7392062373f9. 2026-03-09T00:19:34.502 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:19:34 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:19:34.148Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:19:34.502 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:34 vm04 ceph-mon[96438]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:19:34.502 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:34 vm04 ceph-mon[96438]: osdmap e119: 8 total, 7 up, 8 in 2026-03-09T00:19:34.502 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:34 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:34.502 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:34 vm04 ceph-mon[94619]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:19:34.502 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:34 vm04 ceph-mon[94619]: osdmap e119: 8 total, 7 up, 8 in 2026-03-09T00:19:34.502 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:34 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:34.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:34 vm10 ceph-mon[82076]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:19:34.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:34 vm10 ceph-mon[82076]: osdmap e119: 8 total, 7 up, 8 in 2026-03-09T00:19:34.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:34 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:34.924 INFO:journalctl@ceph.osd.4.vm10.stdout:Mar 09 00:19:34 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-4[90632]: 2026-03-09T00:19:34.620+0000 7fc6e38dc740 -1 Falling back to public interface 2026-03-09T00:19:35.296 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:19:35 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:19:35] "GET /metrics HTTP/1.1" 200 37762 "" "Prometheus/2.51.0" 2026-03-09T00:19:35.492 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:35 vm10 ceph-mon[82076]: pgmap v111: 161 pgs: 32 peering, 14 stale+active+clean, 115 active+clean; 457 KiB data, 189 MiB used, 160 GiB / 160 GiB avail; 511 B/s rd, 0 op/s 2026-03-09T00:19:35.492 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:35 vm10 ceph-mon[82076]: Health check failed: Reduced data availability: 6 pgs inactive, 9 pgs peering (PG_AVAILABILITY) 2026-03-09T00:19:35.492 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:35 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:35.492 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:35 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:19:35.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:35 vm04 ceph-mon[96438]: pgmap v111: 161 pgs: 32 peering, 14 stale+active+clean, 115 active+clean; 457 KiB data, 189 MiB used, 160 GiB / 160 GiB avail; 511 B/s rd, 0 op/s 2026-03-09T00:19:35.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:35 vm04 ceph-mon[96438]: Health check failed: Reduced data availability: 6 pgs inactive, 9 pgs peering (PG_AVAILABILITY) 2026-03-09T00:19:35.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:35 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:35.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:35 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:19:35.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:35 vm04 ceph-mon[94619]: pgmap v111: 161 pgs: 32 peering, 14 stale+active+clean, 115 active+clean; 457 KiB data, 189 MiB used, 160 GiB / 160 GiB avail; 511 B/s rd, 0 op/s 2026-03-09T00:19:35.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:35 vm04 ceph-mon[94619]: Health check failed: Reduced data availability: 6 pgs inactive, 9 pgs peering (PG_AVAILABILITY) 2026-03-09T00:19:35.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:35 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:35.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:35 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:19:35.828 INFO:journalctl@ceph.osd.4.vm10.stdout:Mar 09 00:19:35 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-4[90632]: 2026-03-09T00:19:35.489+0000 7fc6e38dc740 -1 osd.4 0 read_superblock omap replica is missing. 2026-03-09T00:19:35.828 INFO:journalctl@ceph.osd.4.vm10.stdout:Mar 09 00:19:35 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-4[90632]: 2026-03-09T00:19:35.514+0000 7fc6e38dc740 -1 osd.4 117 log_to_monitors true 2026-03-09T00:19:36.361 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:36 vm10 ceph-mon[82076]: from='osd.4 [v2:192.168.123.110:6800/3769924156,v1:192.168.123.110:6801/3769924156]' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-09T00:19:36.362 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:36 vm10 ceph-mon[82076]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-09T00:19:36.362 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:36 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:36.362 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:36 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:36.362 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:36 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:36.362 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:36 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:36.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:36 vm04 ceph-mon[96438]: from='osd.4 [v2:192.168.123.110:6800/3769924156,v1:192.168.123.110:6801/3769924156]' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-09T00:19:36.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:36 vm04 ceph-mon[96438]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-09T00:19:36.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:36 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:36.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:36 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:36.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:36 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:36.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:36 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:36.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:36 vm04 ceph-mon[94619]: from='osd.4 [v2:192.168.123.110:6800/3769924156,v1:192.168.123.110:6801/3769924156]' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-09T00:19:36.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:36 vm04 ceph-mon[94619]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-09T00:19:36.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:36 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:36.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:36 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:36.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:36 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:36.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:36 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:37.312 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:19:37 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:19:37.016Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:19:37.312 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:19:37 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:19:37.017Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:19:37.578 INFO:journalctl@ceph.osd.4.vm10.stdout:Mar 09 00:19:37 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-4[90632]: 2026-03-09T00:19:37.429+0000 7fc6dae86640 -1 osd.4 117 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-09T00:19:37.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:37 vm10 ceph-mon[82076]: pgmap v112: 161 pgs: 23 active+undersized, 32 peering, 13 active+undersized+degraded, 93 active+clean; 457 KiB data, 189 MiB used, 160 GiB / 160 GiB avail; 60/723 objects degraded (8.299%) 2026-03-09T00:19:37.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:37 vm10 ceph-mon[82076]: from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]': finished 2026-03-09T00:19:37.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:37 vm10 ceph-mon[82076]: osdmap e120: 8 total, 7 up, 8 in 2026-03-09T00:19:37.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:37 vm10 ceph-mon[82076]: from='osd.4 [v2:192.168.123.110:6800/3769924156,v1:192.168.123.110:6801/3769924156]' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm10", "root=default"]}]: dispatch 2026-03-09T00:19:37.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:37 vm10 ceph-mon[82076]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm10", "root=default"]}]: dispatch 2026-03-09T00:19:37.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:37 vm10 ceph-mon[82076]: Health check failed: Degraded data redundancy: 60/723 objects degraded (8.299%), 13 pgs degraded (PG_DEGRADED) 2026-03-09T00:19:37.601 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:19:37 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: 2026-03-09T00:19:37.460+0000 7fe15b141640 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (13 PGs are or would become offline) 2026-03-09T00:19:37.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:37 vm04 ceph-mon[96438]: pgmap v112: 161 pgs: 23 active+undersized, 32 peering, 13 active+undersized+degraded, 93 active+clean; 457 KiB data, 189 MiB used, 160 GiB / 160 GiB avail; 60/723 objects degraded (8.299%) 2026-03-09T00:19:37.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:37 vm04 ceph-mon[96438]: from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]': finished 2026-03-09T00:19:37.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:37 vm04 ceph-mon[96438]: osdmap e120: 8 total, 7 up, 8 in 2026-03-09T00:19:37.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:37 vm04 ceph-mon[96438]: from='osd.4 [v2:192.168.123.110:6800/3769924156,v1:192.168.123.110:6801/3769924156]' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm10", "root=default"]}]: dispatch 2026-03-09T00:19:37.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:37 vm04 ceph-mon[96438]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm10", "root=default"]}]: dispatch 2026-03-09T00:19:37.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:37 vm04 ceph-mon[96438]: Health check failed: Degraded data redundancy: 60/723 objects degraded (8.299%), 13 pgs degraded (PG_DEGRADED) 2026-03-09T00:19:37.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:37 vm04 ceph-mon[94619]: pgmap v112: 161 pgs: 23 active+undersized, 32 peering, 13 active+undersized+degraded, 93 active+clean; 457 KiB data, 189 MiB used, 160 GiB / 160 GiB avail; 60/723 objects degraded (8.299%) 2026-03-09T00:19:37.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:37 vm04 ceph-mon[94619]: from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]': finished 2026-03-09T00:19:37.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:37 vm04 ceph-mon[94619]: osdmap e120: 8 total, 7 up, 8 in 2026-03-09T00:19:37.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:37 vm04 ceph-mon[94619]: from='osd.4 [v2:192.168.123.110:6800/3769924156,v1:192.168.123.110:6801/3769924156]' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm10", "root=default"]}]: dispatch 2026-03-09T00:19:37.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:37 vm04 ceph-mon[94619]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm10", "root=default"]}]: dispatch 2026-03-09T00:19:37.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:37 vm04 ceph-mon[94619]: Health check failed: Degraded data redundancy: 60/723 objects degraded (8.299%), 13 pgs degraded (PG_DEGRADED) 2026-03-09T00:19:38.583 INFO:teuthology.orchestra.run.vm04.stdout:true 2026-03-09T00:19:38.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:38 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:38.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:38 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:38.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:38 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:19:38.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:38 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:19:38.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:38 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:38.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:38 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:19:38.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:38 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:19:38.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:38 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:19:38.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:38 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:19:38.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:38 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["5"], "max": 16}]: dispatch 2026-03-09T00:19:38.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:38 vm10 ceph-mon[82076]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["5"], "max": 16}]: dispatch 2026-03-09T00:19:38.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:38 vm10 ceph-mon[82076]: Upgrade: unsafe to stop osd(s) at this time (13 PGs are or would become offline) 2026-03-09T00:19:38.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:38 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:38.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:38 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:38.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:38 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:19:38.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:38 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:19:38.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:38 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:38.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:38 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:19:38.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:38 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:19:38.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:38 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:19:38.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:38 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:19:38.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:38 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["5"], "max": 16}]: dispatch 2026-03-09T00:19:38.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:38 vm04 ceph-mon[94619]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["5"], "max": 16}]: dispatch 2026-03-09T00:19:38.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:38 vm04 ceph-mon[94619]: Upgrade: unsafe to stop osd(s) at this time (13 PGs are or would become offline) 2026-03-09T00:19:38.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:38 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:38.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:38 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:38.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:38 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:19:38.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:38 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:19:38.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:38 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:38.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:38 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:19:38.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:38 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:19:38.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:38 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:19:38.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:38 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:19:38.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:38 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["5"], "max": 16}]: dispatch 2026-03-09T00:19:38.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:38 vm04 ceph-mon[96438]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["5"], "max": 16}]: dispatch 2026-03-09T00:19:38.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:38 vm04 ceph-mon[96438]: Upgrade: unsafe to stop osd(s) at this time (13 PGs are or would become offline) 2026-03-09T00:19:39.010 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-09T00:19:39.011 INFO:teuthology.orchestra.run.vm04.stdout:alertmanager.a vm04 *:9093,9094 running (16m) 24s ago 23m 28.2M - 0.25.0 c8568f914cd2 bcac0140b0f6 2026-03-09T00:19:39.011 INFO:teuthology.orchestra.run.vm04.stdout:grafana.a vm10 *:3000 running (3m) 3s ago 22m 79.4M - 10.4.0 c8b91775d855 aa7f793dcb8e 2026-03-09T00:19:39.011 INFO:teuthology.orchestra.run.vm04.stdout:iscsi.foo.vm04.fbyciv vm04 running (4m) 24s ago 22m 51.9M - 3.5 e1d6a67b021e cdb4168e72eb 2026-03-09T00:19:39.011 INFO:teuthology.orchestra.run.vm04.stdout:mgr.x vm10 *:8443,9283,8765 running (4m) 3s ago 24m 489M - 19.2.3-678-ge911bdeb 654f31e6858e c971ca6e9652 2026-03-09T00:19:39.011 INFO:teuthology.orchestra.run.vm04.stdout:mgr.y vm04 *:8443,9283,8765 running (13m) 24s ago 25m 557M - 19.2.3-678-ge911bdeb 654f31e6858e 72a51572b51b 2026-03-09T00:19:39.011 INFO:teuthology.orchestra.run.vm04.stdout:mon.a vm04 running (3m) 24s ago 25m 53.9M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 3a1ecb9ee7d1 2026-03-09T00:19:39.011 INFO:teuthology.orchestra.run.vm04.stdout:mon.b vm10 running (3m) 3s ago 25m 48.4M 2048M 19.2.3-678-ge911bdeb 654f31e6858e b102ade927df 2026-03-09T00:19:39.011 INFO:teuthology.orchestra.run.vm04.stdout:mon.c vm04 running (2m) 24s ago 25m 41.4M 2048M 19.2.3-678-ge911bdeb 654f31e6858e d5119f6d2345 2026-03-09T00:19:39.011 INFO:teuthology.orchestra.run.vm04.stdout:node-exporter.a vm04 *:9100 running (16m) 24s ago 23m 9.78M - 1.7.0 72c9c2088986 38e0af6b2fbf 2026-03-09T00:19:39.011 INFO:teuthology.orchestra.run.vm04.stdout:node-exporter.b vm10 *:9100 running (16m) 3s ago 23m 10.2M - 1.7.0 72c9c2088986 d059c0022310 2026-03-09T00:19:39.011 INFO:teuthology.orchestra.run.vm04.stdout:osd.0 vm04 running (2m) 24s ago 24m 70.7M 4096M 19.2.3-678-ge911bdeb 654f31e6858e a5eb77bcb38b 2026-03-09T00:19:39.011 INFO:teuthology.orchestra.run.vm04.stdout:osd.1 vm04 running (117s) 24s ago 24m 49.3M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 522cf40e592d 2026-03-09T00:19:39.011 INFO:teuthology.orchestra.run.vm04.stdout:osd.2 vm04 running (62s) 24s ago 24m 45.4M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 69a18f90367f 2026-03-09T00:19:39.011 INFO:teuthology.orchestra.run.vm04.stdout:osd.3 vm04 running (26s) 24s ago 24m 13.2M 4096M 19.2.3-678-ge911bdeb 654f31e6858e e8bef19a96a6 2026-03-09T00:19:39.011 INFO:teuthology.orchestra.run.vm04.stdout:osd.4 vm10 running (4s) 3s ago 24m 15.5M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 5cd5f044c189 2026-03-09T00:19:39.011 INFO:teuthology.orchestra.run.vm04.stdout:osd.5 vm10 running (23m) 3s ago 23m 58.6M 4096M 17.2.0 e1d6a67b021e 1bef86fdb303 2026-03-09T00:19:39.011 INFO:teuthology.orchestra.run.vm04.stdout:osd.6 vm10 running (23m) 3s ago 23m 55.7M 4096M 17.2.0 e1d6a67b021e 168db5828111 2026-03-09T00:19:39.011 INFO:teuthology.orchestra.run.vm04.stdout:osd.7 vm10 running (23m) 3s ago 23m 60.7M 4096M 17.2.0 e1d6a67b021e bc6bbac15079 2026-03-09T00:19:39.011 INFO:teuthology.orchestra.run.vm04.stdout:prometheus.a vm10 *:9095 running (4m) 3s ago 23m 51.9M - 2.51.0 1d3b7f56885b 1f53121cfa7f 2026-03-09T00:19:39.011 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.ehrfsf vm04 *:8000 running (22m) 24s ago 22m 100M - 17.2.0 e1d6a67b021e a815abb0c790 2026-03-09T00:19:39.011 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm10.dwizvi vm10 *:8000 running (22m) 3s ago 22m 98.4M - 17.2.0 e1d6a67b021e f6412acdf6e0 2026-03-09T00:19:39.251 INFO:teuthology.orchestra.run.vm04.stdout:{ 2026-03-09T00:19:39.251 INFO:teuthology.orchestra.run.vm04.stdout: "mon": { 2026-03-09T00:19:39.251 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 3 2026-03-09T00:19:39.251 INFO:teuthology.orchestra.run.vm04.stdout: }, 2026-03-09T00:19:39.251 INFO:teuthology.orchestra.run.vm04.stdout: "mgr": { 2026-03-09T00:19:39.251 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-09T00:19:39.251 INFO:teuthology.orchestra.run.vm04.stdout: }, 2026-03-09T00:19:39.251 INFO:teuthology.orchestra.run.vm04.stdout: "osd": { 2026-03-09T00:19:39.251 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 3, 2026-03-09T00:19:39.251 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 5 2026-03-09T00:19:39.251 INFO:teuthology.orchestra.run.vm04.stdout: }, 2026-03-09T00:19:39.251 INFO:teuthology.orchestra.run.vm04.stdout: "rgw": { 2026-03-09T00:19:39.251 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 2 2026-03-09T00:19:39.251 INFO:teuthology.orchestra.run.vm04.stdout: }, 2026-03-09T00:19:39.251 INFO:teuthology.orchestra.run.vm04.stdout: "overall": { 2026-03-09T00:19:39.251 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 5, 2026-03-09T00:19:39.251 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 10 2026-03-09T00:19:39.251 INFO:teuthology.orchestra.run.vm04.stdout: } 2026-03-09T00:19:39.251 INFO:teuthology.orchestra.run.vm04.stdout:} 2026-03-09T00:19:39.480 INFO:teuthology.orchestra.run.vm04.stdout:{ 2026-03-09T00:19:39.480 INFO:teuthology.orchestra.run.vm04.stdout: "target_image": "quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc", 2026-03-09T00:19:39.481 INFO:teuthology.orchestra.run.vm04.stdout: "in_progress": true, 2026-03-09T00:19:39.481 INFO:teuthology.orchestra.run.vm04.stdout: "which": "Upgrading daemons of type(s) crash,osd", 2026-03-09T00:19:39.481 INFO:teuthology.orchestra.run.vm04.stdout: "services_complete": [], 2026-03-09T00:19:39.481 INFO:teuthology.orchestra.run.vm04.stdout: "progress": "5/8 daemons upgraded", 2026-03-09T00:19:39.481 INFO:teuthology.orchestra.run.vm04.stdout: "message": "Currently upgrading osd daemons", 2026-03-09T00:19:39.481 INFO:teuthology.orchestra.run.vm04.stdout: "is_paused": false 2026-03-09T00:19:39.481 INFO:teuthology.orchestra.run.vm04.stdout:} 2026-03-09T00:19:39.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:39 vm10 ceph-mon[82076]: OSD bench result of 17479.646176 IOPS is not within the threshold limit range of 50.000000 IOPS and 500.000000 IOPS for osd.4. IOPS capacity is unchanged at 315.000000 IOPS. The recommendation is to establish the osd's IOPS capacity using other benchmark tools (e.g. Fio) and then override osd_mclock_max_capacity_iops_[hdd|ssd]. 2026-03-09T00:19:39.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:39 vm10 ceph-mon[82076]: pgmap v114: 161 pgs: 30 active+undersized, 19 peering, 19 active+undersized+degraded, 93 active+clean; 457 KiB data, 207 MiB used, 160 GiB / 160 GiB avail; 96/723 objects degraded (13.278%) 2026-03-09T00:19:39.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:39 vm10 ceph-mon[82076]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-09T00:19:39.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:39 vm10 ceph-mon[82076]: osd.4 [v2:192.168.123.110:6800/3769924156,v1:192.168.123.110:6801/3769924156] boot 2026-03-09T00:19:39.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:39 vm10 ceph-mon[82076]: osdmap e121: 8 total, 8 up, 8 in 2026-03-09T00:19:39.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:39 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T00:19:39.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:39 vm10 ceph-mon[82076]: from='client.34376 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:19:39.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:39 vm10 ceph-mon[82076]: from='client.54337 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:19:39.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:39 vm10 ceph-mon[82076]: from='client.? 192.168.123.104:0/893026592' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:19:39.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:39 vm04 ceph-mon[96438]: OSD bench result of 17479.646176 IOPS is not within the threshold limit range of 50.000000 IOPS and 500.000000 IOPS for osd.4. IOPS capacity is unchanged at 315.000000 IOPS. The recommendation is to establish the osd's IOPS capacity using other benchmark tools (e.g. Fio) and then override osd_mclock_max_capacity_iops_[hdd|ssd]. 2026-03-09T00:19:39.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:39 vm04 ceph-mon[96438]: pgmap v114: 161 pgs: 30 active+undersized, 19 peering, 19 active+undersized+degraded, 93 active+clean; 457 KiB data, 207 MiB used, 160 GiB / 160 GiB avail; 96/723 objects degraded (13.278%) 2026-03-09T00:19:39.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:39 vm04 ceph-mon[96438]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-09T00:19:39.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:39 vm04 ceph-mon[96438]: osd.4 [v2:192.168.123.110:6800/3769924156,v1:192.168.123.110:6801/3769924156] boot 2026-03-09T00:19:39.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:39 vm04 ceph-mon[96438]: osdmap e121: 8 total, 8 up, 8 in 2026-03-09T00:19:39.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:39 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T00:19:39.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:39 vm04 ceph-mon[96438]: from='client.34376 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:19:39.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:39 vm04 ceph-mon[96438]: from='client.54337 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:19:39.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:39 vm04 ceph-mon[96438]: from='client.? 192.168.123.104:0/893026592' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:19:39.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:39 vm04 ceph-mon[94619]: OSD bench result of 17479.646176 IOPS is not within the threshold limit range of 50.000000 IOPS and 500.000000 IOPS for osd.4. IOPS capacity is unchanged at 315.000000 IOPS. The recommendation is to establish the osd's IOPS capacity using other benchmark tools (e.g. Fio) and then override osd_mclock_max_capacity_iops_[hdd|ssd]. 2026-03-09T00:19:39.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:39 vm04 ceph-mon[94619]: pgmap v114: 161 pgs: 30 active+undersized, 19 peering, 19 active+undersized+degraded, 93 active+clean; 457 KiB data, 207 MiB used, 160 GiB / 160 GiB avail; 96/723 objects degraded (13.278%) 2026-03-09T00:19:39.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:39 vm04 ceph-mon[94619]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-09T00:19:39.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:39 vm04 ceph-mon[94619]: osd.4 [v2:192.168.123.110:6800/3769924156,v1:192.168.123.110:6801/3769924156] boot 2026-03-09T00:19:39.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:39 vm04 ceph-mon[94619]: osdmap e121: 8 total, 8 up, 8 in 2026-03-09T00:19:39.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:39 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T00:19:39.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:39 vm04 ceph-mon[94619]: from='client.34376 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:19:39.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:39 vm04 ceph-mon[94619]: from='client.54337 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:19:39.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:39 vm04 ceph-mon[94619]: from='client.? 192.168.123.104:0/893026592' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:19:40.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:40 vm10 ceph-mon[82076]: from='client.54343 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:19:40.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:40 vm10 ceph-mon[82076]: osdmap e122: 8 total, 8 up, 8 in 2026-03-09T00:19:40.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:40 vm10 ceph-mon[82076]: from='client.54352 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:19:40.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:40 vm04 ceph-mon[96438]: from='client.54343 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:19:40.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:40 vm04 ceph-mon[96438]: osdmap e122: 8 total, 8 up, 8 in 2026-03-09T00:19:40.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:40 vm04 ceph-mon[96438]: from='client.54352 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:19:40.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:40 vm04 ceph-mon[94619]: from='client.54343 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:19:40.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:40 vm04 ceph-mon[94619]: osdmap e122: 8 total, 8 up, 8 in 2026-03-09T00:19:40.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:40 vm04 ceph-mon[94619]: from='client.54352 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:19:41.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:41 vm10 ceph-mon[82076]: pgmap v117: 161 pgs: 7 peering, 33 active+undersized, 21 active+undersized+degraded, 100 active+clean; 457 KiB data, 207 MiB used, 160 GiB / 160 GiB avail; 95/723 objects degraded (13.140%) 2026-03-09T00:19:41.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:41 vm10 ceph-mon[82076]: Health check cleared: PG_AVAILABILITY (was: Reduced data availability: 6 pgs inactive, 8 pgs peering) 2026-03-09T00:19:41.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:41 vm04 ceph-mon[96438]: pgmap v117: 161 pgs: 7 peering, 33 active+undersized, 21 active+undersized+degraded, 100 active+clean; 457 KiB data, 207 MiB used, 160 GiB / 160 GiB avail; 95/723 objects degraded (13.140%) 2026-03-09T00:19:41.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:41 vm04 ceph-mon[96438]: Health check cleared: PG_AVAILABILITY (was: Reduced data availability: 6 pgs inactive, 8 pgs peering) 2026-03-09T00:19:41.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:41 vm04 ceph-mon[94619]: pgmap v117: 161 pgs: 7 peering, 33 active+undersized, 21 active+undersized+degraded, 100 active+clean; 457 KiB data, 207 MiB used, 160 GiB / 160 GiB avail; 95/723 objects degraded (13.140%) 2026-03-09T00:19:41.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:41 vm04 ceph-mon[94619]: Health check cleared: PG_AVAILABILITY (was: Reduced data availability: 6 pgs inactive, 8 pgs peering) 2026-03-09T00:19:43.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:42 vm10 ceph-mon[82076]: pgmap v118: 161 pgs: 7 peering, 14 active+undersized, 9 active+undersized+degraded, 131 active+clean; 457 KiB data, 607 MiB used, 159 GiB / 160 GiB avail; 853 B/s rd, 0 op/s; 54/723 objects degraded (7.469%); 0 B/s, 0 objects/s recovering 2026-03-09T00:19:43.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:42 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:43.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:42 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:19:43.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:42 vm10 ceph-mon[82076]: Health check update: Degraded data redundancy: 54/723 objects degraded (7.469%), 9 pgs degraded (PG_DEGRADED) 2026-03-09T00:19:43.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:42 vm04 ceph-mon[96438]: pgmap v118: 161 pgs: 7 peering, 14 active+undersized, 9 active+undersized+degraded, 131 active+clean; 457 KiB data, 607 MiB used, 159 GiB / 160 GiB avail; 853 B/s rd, 0 op/s; 54/723 objects degraded (7.469%); 0 B/s, 0 objects/s recovering 2026-03-09T00:19:43.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:42 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:43.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:42 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:19:43.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:42 vm04 ceph-mon[96438]: Health check update: Degraded data redundancy: 54/723 objects degraded (7.469%), 9 pgs degraded (PG_DEGRADED) 2026-03-09T00:19:43.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:42 vm04 ceph-mon[94619]: pgmap v118: 161 pgs: 7 peering, 14 active+undersized, 9 active+undersized+degraded, 131 active+clean; 457 KiB data, 607 MiB used, 159 GiB / 160 GiB avail; 853 B/s rd, 0 op/s; 54/723 objects degraded (7.469%); 0 B/s, 0 objects/s recovering 2026-03-09T00:19:43.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:42 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:43.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:42 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:19:43.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:42 vm04 ceph-mon[94619]: Health check update: Degraded data redundancy: 54/723 objects degraded (7.469%), 9 pgs degraded (PG_DEGRADED) 2026-03-09T00:19:44.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:43 vm10 ceph-mon[82076]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:19:44.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:43 vm04 ceph-mon[96438]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:19:44.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:19:44 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:19:44.149Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=1 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:19:44.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:43 vm04 ceph-mon[94619]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:19:45.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:44 vm10 ceph-mon[82076]: pgmap v119: 161 pgs: 7 peering, 154 active+clean; 457 KiB data, 608 MiB used, 159 GiB / 160 GiB avail; 673 B/s rd, 0 op/s; 0 B/s, 0 objects/s recovering 2026-03-09T00:19:45.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:44 vm10 ceph-mon[82076]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 54/723 objects degraded (7.469%), 9 pgs degraded) 2026-03-09T00:19:45.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:44 vm10 ceph-mon[82076]: Cluster is now healthy 2026-03-09T00:19:45.350 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:19:45 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:19:45] "GET /metrics HTTP/1.1" 200 37776 "" "Prometheus/2.51.0" 2026-03-09T00:19:45.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:44 vm04 ceph-mon[96438]: pgmap v119: 161 pgs: 7 peering, 154 active+clean; 457 KiB data, 608 MiB used, 159 GiB / 160 GiB avail; 673 B/s rd, 0 op/s; 0 B/s, 0 objects/s recovering 2026-03-09T00:19:45.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:44 vm04 ceph-mon[96438]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 54/723 objects degraded (7.469%), 9 pgs degraded) 2026-03-09T00:19:45.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:44 vm04 ceph-mon[96438]: Cluster is now healthy 2026-03-09T00:19:45.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:44 vm04 ceph-mon[94619]: pgmap v119: 161 pgs: 7 peering, 154 active+clean; 457 KiB data, 608 MiB used, 159 GiB / 160 GiB avail; 673 B/s rd, 0 op/s; 0 B/s, 0 objects/s recovering 2026-03-09T00:19:45.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:44 vm04 ceph-mon[94619]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 54/723 objects degraded (7.469%), 9 pgs degraded) 2026-03-09T00:19:45.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:44 vm04 ceph-mon[94619]: Cluster is now healthy 2026-03-09T00:19:47.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:46 vm10 ceph-mon[82076]: pgmap v120: 161 pgs: 161 active+clean; 457 KiB data, 608 MiB used, 159 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s; 0 B/s, 0 objects/s recovering 2026-03-09T00:19:47.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:19:47 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:19:47.017Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:19:47.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:19:47 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:19:47.018Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:19:47.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:46 vm04 ceph-mon[96438]: pgmap v120: 161 pgs: 161 active+clean; 457 KiB data, 608 MiB used, 159 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s; 0 B/s, 0 objects/s recovering 2026-03-09T00:19:47.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:46 vm04 ceph-mon[94619]: pgmap v120: 161 pgs: 161 active+clean; 457 KiB data, 608 MiB used, 159 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s; 0 B/s, 0 objects/s recovering 2026-03-09T00:19:48.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:48 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:48.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:48 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:48.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:48 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:49.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:49 vm10 ceph-mon[82076]: pgmap v121: 161 pgs: 161 active+clean; 457 KiB data, 208 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s; 0 B/s, 0 objects/s recovering 2026-03-09T00:19:49.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:49 vm04 ceph-mon[96438]: pgmap v121: 161 pgs: 161 active+clean; 457 KiB data, 208 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s; 0 B/s, 0 objects/s recovering 2026-03-09T00:19:49.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:49 vm04 ceph-mon[94619]: pgmap v121: 161 pgs: 161 active+clean; 457 KiB data, 208 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s; 0 B/s, 0 objects/s recovering 2026-03-09T00:19:51.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:51 vm10 ceph-mon[82076]: pgmap v122: 161 pgs: 161 active+clean; 457 KiB data, 208 MiB used, 160 GiB / 160 GiB avail; 975 B/s rd, 0 op/s; 0 B/s, 0 objects/s recovering 2026-03-09T00:19:51.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:51 vm04 ceph-mon[96438]: pgmap v122: 161 pgs: 161 active+clean; 457 KiB data, 208 MiB used, 160 GiB / 160 GiB avail; 975 B/s rd, 0 op/s; 0 B/s, 0 objects/s recovering 2026-03-09T00:19:51.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:51 vm04 ceph-mon[94619]: pgmap v122: 161 pgs: 161 active+clean; 457 KiB data, 208 MiB used, 160 GiB / 160 GiB avail; 975 B/s rd, 0 op/s; 0 B/s, 0 objects/s recovering 2026-03-09T00:19:53.500 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:53 vm10 ceph-mon[82076]: pgmap v123: 161 pgs: 161 active+clean; 457 KiB data, 208 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s; 0 B/s, 0 objects/s recovering 2026-03-09T00:19:53.500 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:53 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["5"], "max": 16}]: dispatch 2026-03-09T00:19:53.500 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:53 vm10 ceph-mon[82076]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["5"], "max": 16}]: dispatch 2026-03-09T00:19:53.500 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:53 vm10 ceph-mon[82076]: Upgrade: osd.5 is safe to restart 2026-03-09T00:19:53.500 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:53 vm10 ceph-mon[82076]: Upgrade: Updating osd.5 2026-03-09T00:19:53.500 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:53 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:53.500 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:53 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.5"}]: dispatch 2026-03-09T00:19:53.500 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:53 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:19:53.500 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:53 vm10 ceph-mon[82076]: Deploying daemon osd.5 on vm10 2026-03-09T00:19:53.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:53 vm04 ceph-mon[96438]: pgmap v123: 161 pgs: 161 active+clean; 457 KiB data, 208 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s; 0 B/s, 0 objects/s recovering 2026-03-09T00:19:53.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:53 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["5"], "max": 16}]: dispatch 2026-03-09T00:19:53.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:53 vm04 ceph-mon[96438]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["5"], "max": 16}]: dispatch 2026-03-09T00:19:53.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:53 vm04 ceph-mon[96438]: Upgrade: osd.5 is safe to restart 2026-03-09T00:19:53.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:53 vm04 ceph-mon[96438]: Upgrade: Updating osd.5 2026-03-09T00:19:53.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:53 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:53.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:53 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.5"}]: dispatch 2026-03-09T00:19:53.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:53 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:19:53.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:53 vm04 ceph-mon[96438]: Deploying daemon osd.5 on vm10 2026-03-09T00:19:53.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:53 vm04 ceph-mon[94619]: pgmap v123: 161 pgs: 161 active+clean; 457 KiB data, 208 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s; 0 B/s, 0 objects/s recovering 2026-03-09T00:19:53.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:53 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["5"], "max": 16}]: dispatch 2026-03-09T00:19:53.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:53 vm04 ceph-mon[94619]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["5"], "max": 16}]: dispatch 2026-03-09T00:19:53.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:53 vm04 ceph-mon[94619]: Upgrade: osd.5 is safe to restart 2026-03-09T00:19:53.602 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:53 vm04 ceph-mon[94619]: Upgrade: Updating osd.5 2026-03-09T00:19:53.602 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:53 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:53.602 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:53 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.5"}]: dispatch 2026-03-09T00:19:53.602 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:53 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:19:53.602 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:53 vm04 ceph-mon[94619]: Deploying daemon osd.5 on vm10 2026-03-09T00:19:53.828 INFO:journalctl@ceph.osd.5.vm10.stdout:Mar 09 00:19:53 vm10 systemd[1]: Stopping Ceph osd.5 for fdcbddf6-1b49-11f1-80b0-7392062373f9... 2026-03-09T00:19:53.828 INFO:journalctl@ceph.osd.5.vm10.stdout:Mar 09 00:19:53 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-5[55265]: 2026-03-09T00:19:53.621+0000 7ff58ef1e700 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.5 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-09T00:19:53.828 INFO:journalctl@ceph.osd.5.vm10.stdout:Mar 09 00:19:53 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-5[55265]: 2026-03-09T00:19:53.621+0000 7ff58ef1e700 -1 osd.5 122 *** Got signal Terminated *** 2026-03-09T00:19:53.828 INFO:journalctl@ceph.osd.5.vm10.stdout:Mar 09 00:19:53 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-5[55265]: 2026-03-09T00:19:53.621+0000 7ff58ef1e700 -1 osd.5 122 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-09T00:19:54.568 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:54 vm10 ceph-mon[82076]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:19:54.568 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:54 vm10 ceph-mon[82076]: osd.5 marked itself down and dead 2026-03-09T00:19:54.568 INFO:journalctl@ceph.osd.5.vm10.stdout:Mar 09 00:19:54 vm10 podman[94335]: 2026-03-09 00:19:54.376071044 +0000 UTC m=+0.770986561 container died 1bef86fdb3031cd909869f4f8b4346fe1429b52044a5fc74fbc9e8f75cb1faee (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-5, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, io.openshift.expose-services=, name=centos-stream, GIT_CLEAN=True, GIT_REPO=https://github.com/ceph/ceph-container.git, io.openshift.tags=base centos centos-stream, io.buildah.version=1.19.8, RELEASE=HEAD, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, maintainer=Guillaume Abrioux , vcs-type=git, architecture=x86_64, com.redhat.component=centos-stream-container, distribution-scope=public, CEPH_POINT_RELEASE=-17.2.0, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, ceph=True, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.k8s.display-name=CentOS Stream 8, GIT_BRANCH=HEAD, build-date=2022-05-03T08:36:31.336870, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, release=754, vendor=Red Hat, Inc., version=8) 2026-03-09T00:19:54.568 INFO:journalctl@ceph.osd.5.vm10.stdout:Mar 09 00:19:54 vm10 podman[94335]: 2026-03-09 00:19:54.394167595 +0000 UTC m=+0.789083101 container remove 1bef86fdb3031cd909869f4f8b4346fe1429b52044a5fc74fbc9e8f75cb1faee (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-5, vcs-type=git, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, io.buildah.version=1.19.8, name=centos-stream, release=754, io.openshift.expose-services=, ceph=True, io.k8s.display-name=CentOS Stream 8, GIT_REPO=https://github.com/ceph/ceph-container.git, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, architecture=x86_64, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, GIT_CLEAN=True, version=8, vendor=Red Hat, Inc., GIT_BRANCH=HEAD, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, maintainer=Guillaume Abrioux , distribution-scope=public, CEPH_POINT_RELEASE=-17.2.0, build-date=2022-05-03T08:36:31.336870, com.redhat.component=centos-stream-container, io.openshift.tags=base centos centos-stream, RELEASE=HEAD) 2026-03-09T00:19:54.568 INFO:journalctl@ceph.osd.5.vm10.stdout:Mar 09 00:19:54 vm10 bash[94335]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-5 2026-03-09T00:19:54.568 INFO:journalctl@ceph.osd.5.vm10.stdout:Mar 09 00:19:54 vm10 podman[94403]: 2026-03-09 00:19:54.54411808 +0000 UTC m=+0.019000162 container create bb20947a3825a71d2f01ea2e3398a1466e4c772da3984e3f714ced9537d047aa (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-5-deactivate, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, CEPH_REF=squid, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, org.label-schema.build-date=20260223) 2026-03-09T00:19:54.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:54 vm04 ceph-mon[96438]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:19:54.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:54 vm04 ceph-mon[96438]: osd.5 marked itself down and dead 2026-03-09T00:19:54.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:54 vm04 ceph-mon[94619]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:19:54.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:54 vm04 ceph-mon[94619]: osd.5 marked itself down and dead 2026-03-09T00:19:54.829 INFO:journalctl@ceph.osd.5.vm10.stdout:Mar 09 00:19:54 vm10 podman[94403]: 2026-03-09 00:19:54.581029134 +0000 UTC m=+0.055911227 container init bb20947a3825a71d2f01ea2e3398a1466e4c772da3984e3f714ced9537d047aa (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-5-deactivate, CEPH_REF=squid, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, ceph=True, org.opencontainers.image.authors=Ceph Release Team , io.buildah.version=1.41.3, org.label-schema.build-date=20260223, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df) 2026-03-09T00:19:54.830 INFO:journalctl@ceph.osd.5.vm10.stdout:Mar 09 00:19:54 vm10 podman[94403]: 2026-03-09 00:19:54.587445002 +0000 UTC m=+0.062327095 container start bb20947a3825a71d2f01ea2e3398a1466e4c772da3984e3f714ced9537d047aa (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-5-deactivate, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.label-schema.build-date=20260223, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=squid, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3) 2026-03-09T00:19:54.830 INFO:journalctl@ceph.osd.5.vm10.stdout:Mar 09 00:19:54 vm10 podman[94403]: 2026-03-09 00:19:54.588580979 +0000 UTC m=+0.063463072 container attach bb20947a3825a71d2f01ea2e3398a1466e4c772da3984e3f714ced9537d047aa (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-5-deactivate, OSD_FLAVOR=default, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS, org.label-schema.build-date=20260223, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3) 2026-03-09T00:19:54.830 INFO:journalctl@ceph.osd.5.vm10.stdout:Mar 09 00:19:54 vm10 podman[94403]: 2026-03-09 00:19:54.53658464 +0000 UTC m=+0.011466733 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc 2026-03-09T00:19:54.830 INFO:journalctl@ceph.osd.5.vm10.stdout:Mar 09 00:19:54 vm10 conmon[94414]: conmon bb20947a3825a71d2f01 : Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-bb20947a3825a71d2f01ea2e3398a1466e4c772da3984e3f714ced9537d047aa.scope/container/memory.events 2026-03-09T00:19:54.830 INFO:journalctl@ceph.osd.5.vm10.stdout:Mar 09 00:19:54 vm10 podman[94403]: 2026-03-09 00:19:54.716031331 +0000 UTC m=+0.190913424 container died bb20947a3825a71d2f01ea2e3398a1466e4c772da3984e3f714ced9537d047aa (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-5-deactivate, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, OSD_FLAVOR=default, CEPH_REF=squid, org.label-schema.build-date=20260223, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9) 2026-03-09T00:19:54.830 INFO:journalctl@ceph.osd.5.vm10.stdout:Mar 09 00:19:54 vm10 podman[94403]: 2026-03-09 00:19:54.732574835 +0000 UTC m=+0.207456928 container remove bb20947a3825a71d2f01ea2e3398a1466e4c772da3984e3f714ced9537d047aa (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-5-deactivate, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.schema-version=1.0, ceph=True, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9) 2026-03-09T00:19:54.830 INFO:journalctl@ceph.osd.5.vm10.stdout:Mar 09 00:19:54 vm10 systemd[1]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@osd.5.service: Deactivated successfully. 2026-03-09T00:19:54.830 INFO:journalctl@ceph.osd.5.vm10.stdout:Mar 09 00:19:54 vm10 systemd[1]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@osd.5.service: Unit process 94414 (conmon) remains running after unit stopped. 2026-03-09T00:19:54.830 INFO:journalctl@ceph.osd.5.vm10.stdout:Mar 09 00:19:54 vm10 systemd[1]: Stopped Ceph osd.5 for fdcbddf6-1b49-11f1-80b0-7392062373f9. 2026-03-09T00:19:54.830 INFO:journalctl@ceph.osd.5.vm10.stdout:Mar 09 00:19:54 vm10 systemd[1]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@osd.5.service: Consumed 32.979s CPU time, 207.9M memory peak. 2026-03-09T00:19:55.309 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:19:55 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:19:55] "GET /metrics HTTP/1.1" 200 37776 "" "Prometheus/2.51.0" 2026-03-09T00:19:55.312 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:55 vm10 ceph-mon[82076]: pgmap v124: 161 pgs: 161 active+clean; 457 KiB data, 208 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:19:55.312 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:55 vm10 ceph-mon[82076]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-09T00:19:55.312 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:55 vm10 ceph-mon[82076]: osdmap e123: 8 total, 7 up, 8 in 2026-03-09T00:19:55.312 INFO:journalctl@ceph.osd.5.vm10.stdout:Mar 09 00:19:54 vm10 systemd[1]: Starting Ceph osd.5 for fdcbddf6-1b49-11f1-80b0-7392062373f9... 2026-03-09T00:19:55.312 INFO:journalctl@ceph.osd.5.vm10.stdout:Mar 09 00:19:55 vm10 podman[94503]: 2026-03-09 00:19:55.020629904 +0000 UTC m=+0.019479870 container create eae8c3d846f4d80fda0d0df34a067f548639ed3b1a3a434917aeaa4b661a34e9 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-5-activate, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, ceph=True, io.buildah.version=1.41.3, CEPH_REF=squid, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20260223, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git) 2026-03-09T00:19:55.312 INFO:journalctl@ceph.osd.5.vm10.stdout:Mar 09 00:19:55 vm10 podman[94503]: 2026-03-09 00:19:55.063250395 +0000 UTC m=+0.062100370 container init eae8c3d846f4d80fda0d0df34a067f548639ed3b1a3a434917aeaa4b661a34e9 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-5-activate, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, CEPH_REF=squid, org.label-schema.build-date=20260223, org.label-schema.schema-version=1.0, ceph=True, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git) 2026-03-09T00:19:55.312 INFO:journalctl@ceph.osd.5.vm10.stdout:Mar 09 00:19:55 vm10 podman[94503]: 2026-03-09 00:19:55.066727122 +0000 UTC m=+0.065577077 container start eae8c3d846f4d80fda0d0df34a067f548639ed3b1a3a434917aeaa4b661a34e9 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-5-activate, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20260223, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, OSD_FLAVOR=default, CEPH_REF=squid, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9) 2026-03-09T00:19:55.313 INFO:journalctl@ceph.osd.5.vm10.stdout:Mar 09 00:19:55 vm10 podman[94503]: 2026-03-09 00:19:55.070604008 +0000 UTC m=+0.069453974 container attach eae8c3d846f4d80fda0d0df34a067f548639ed3b1a3a434917aeaa4b661a34e9 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-5-activate, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.build-date=20260223, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_REF=squid, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-09T00:19:55.313 INFO:journalctl@ceph.osd.5.vm10.stdout:Mar 09 00:19:55 vm10 podman[94503]: 2026-03-09 00:19:55.012927418 +0000 UTC m=+0.011777384 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc 2026-03-09T00:19:55.313 INFO:journalctl@ceph.osd.5.vm10.stdout:Mar 09 00:19:55 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-5-activate[94514]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T00:19:55.313 INFO:journalctl@ceph.osd.5.vm10.stdout:Mar 09 00:19:55 vm10 bash[94503]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T00:19:55.313 INFO:journalctl@ceph.osd.5.vm10.stdout:Mar 09 00:19:55 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-5-activate[94514]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T00:19:55.313 INFO:journalctl@ceph.osd.5.vm10.stdout:Mar 09 00:19:55 vm10 bash[94503]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T00:19:55.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:55 vm04 ceph-mon[96438]: pgmap v124: 161 pgs: 161 active+clean; 457 KiB data, 208 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:19:55.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:55 vm04 ceph-mon[96438]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-09T00:19:55.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:55 vm04 ceph-mon[96438]: osdmap e123: 8 total, 7 up, 8 in 2026-03-09T00:19:55.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:55 vm04 ceph-mon[94619]: pgmap v124: 161 pgs: 161 active+clean; 457 KiB data, 208 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:19:55.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:55 vm04 ceph-mon[94619]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-09T00:19:55.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:55 vm04 ceph-mon[94619]: osdmap e123: 8 total, 7 up, 8 in 2026-03-09T00:19:56.005 INFO:journalctl@ceph.osd.5.vm10.stdout:Mar 09 00:19:55 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-5-activate[94514]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-09T00:19:56.005 INFO:journalctl@ceph.osd.5.vm10.stdout:Mar 09 00:19:55 vm10 bash[94503]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-09T00:19:56.005 INFO:journalctl@ceph.osd.5.vm10.stdout:Mar 09 00:19:55 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-5-activate[94514]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T00:19:56.005 INFO:journalctl@ceph.osd.5.vm10.stdout:Mar 09 00:19:55 vm10 bash[94503]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T00:19:56.005 INFO:journalctl@ceph.osd.5.vm10.stdout:Mar 09 00:19:55 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-5-activate[94514]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T00:19:56.005 INFO:journalctl@ceph.osd.5.vm10.stdout:Mar 09 00:19:55 vm10 bash[94503]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T00:19:56.005 INFO:journalctl@ceph.osd.5.vm10.stdout:Mar 09 00:19:55 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-5-activate[94514]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-5 2026-03-09T00:19:56.005 INFO:journalctl@ceph.osd.5.vm10.stdout:Mar 09 00:19:55 vm10 bash[94503]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-5 2026-03-09T00:19:56.005 INFO:journalctl@ceph.osd.5.vm10.stdout:Mar 09 00:19:55 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-5-activate[94514]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-bf9f7cc5-c821-43cf-8b9c-aae28097bea3/osd-block-5efb3808-0928-47a5-97bc-ecad3a99a5e9 --path /var/lib/ceph/osd/ceph-5 --no-mon-config 2026-03-09T00:19:56.005 INFO:journalctl@ceph.osd.5.vm10.stdout:Mar 09 00:19:55 vm10 bash[94503]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-bf9f7cc5-c821-43cf-8b9c-aae28097bea3/osd-block-5efb3808-0928-47a5-97bc-ecad3a99a5e9 --path /var/lib/ceph/osd/ceph-5 --no-mon-config 2026-03-09T00:19:56.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:56 vm10 ceph-mon[82076]: osdmap e124: 8 total, 7 up, 8 in 2026-03-09T00:19:56.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:56 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:56.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:56 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:56.327 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:56 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:19:56.327 INFO:journalctl@ceph.osd.5.vm10.stdout:Mar 09 00:19:56 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-5-activate[94514]: Running command: /usr/bin/ln -snf /dev/ceph-bf9f7cc5-c821-43cf-8b9c-aae28097bea3/osd-block-5efb3808-0928-47a5-97bc-ecad3a99a5e9 /var/lib/ceph/osd/ceph-5/block 2026-03-09T00:19:56.327 INFO:journalctl@ceph.osd.5.vm10.stdout:Mar 09 00:19:56 vm10 bash[94503]: Running command: /usr/bin/ln -snf /dev/ceph-bf9f7cc5-c821-43cf-8b9c-aae28097bea3/osd-block-5efb3808-0928-47a5-97bc-ecad3a99a5e9 /var/lib/ceph/osd/ceph-5/block 2026-03-09T00:19:56.327 INFO:journalctl@ceph.osd.5.vm10.stdout:Mar 09 00:19:56 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-5-activate[94514]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-5/block 2026-03-09T00:19:56.327 INFO:journalctl@ceph.osd.5.vm10.stdout:Mar 09 00:19:56 vm10 bash[94503]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-5/block 2026-03-09T00:19:56.327 INFO:journalctl@ceph.osd.5.vm10.stdout:Mar 09 00:19:56 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-5-activate[94514]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-1 2026-03-09T00:19:56.327 INFO:journalctl@ceph.osd.5.vm10.stdout:Mar 09 00:19:56 vm10 bash[94503]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-1 2026-03-09T00:19:56.327 INFO:journalctl@ceph.osd.5.vm10.stdout:Mar 09 00:19:56 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-5-activate[94514]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-5 2026-03-09T00:19:56.327 INFO:journalctl@ceph.osd.5.vm10.stdout:Mar 09 00:19:56 vm10 bash[94503]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-5 2026-03-09T00:19:56.327 INFO:journalctl@ceph.osd.5.vm10.stdout:Mar 09 00:19:56 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-5-activate[94514]: --> ceph-volume lvm activate successful for osd ID: 5 2026-03-09T00:19:56.327 INFO:journalctl@ceph.osd.5.vm10.stdout:Mar 09 00:19:56 vm10 bash[94503]: --> ceph-volume lvm activate successful for osd ID: 5 2026-03-09T00:19:56.327 INFO:journalctl@ceph.osd.5.vm10.stdout:Mar 09 00:19:56 vm10 conmon[94514]: conmon eae8c3d846f4d80fda0d : Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-eae8c3d846f4d80fda0d0df34a067f548639ed3b1a3a434917aeaa4b661a34e9.scope/container/memory.events 2026-03-09T00:19:56.327 INFO:journalctl@ceph.osd.5.vm10.stdout:Mar 09 00:19:56 vm10 podman[94503]: 2026-03-09 00:19:56.041746407 +0000 UTC m=+1.040596373 container died eae8c3d846f4d80fda0d0df34a067f548639ed3b1a3a434917aeaa4b661a34e9 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-5-activate, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20260223, ceph=True, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, OSD_FLAVOR=default, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git) 2026-03-09T00:19:56.327 INFO:journalctl@ceph.osd.5.vm10.stdout:Mar 09 00:19:56 vm10 podman[94503]: 2026-03-09 00:19:56.063708864 +0000 UTC m=+1.062558830 container remove eae8c3d846f4d80fda0d0df34a067f548639ed3b1a3a434917aeaa4b661a34e9 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-5-activate, io.buildah.version=1.41.3, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team , FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, CEPH_REF=squid, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20260223) 2026-03-09T00:19:56.327 INFO:journalctl@ceph.osd.5.vm10.stdout:Mar 09 00:19:56 vm10 podman[94762]: 2026-03-09 00:19:56.160475648 +0000 UTC m=+0.016480667 container create 50d8ee7c8cb6e3485ffeb3b57f487affbf0f0691ccaafce019c71914a99c7492 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-5, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team , FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=squid, org.label-schema.build-date=20260223, io.buildah.version=1.41.3) 2026-03-09T00:19:56.327 INFO:journalctl@ceph.osd.5.vm10.stdout:Mar 09 00:19:56 vm10 podman[94762]: 2026-03-09 00:19:56.200835819 +0000 UTC m=+0.056840848 container init 50d8ee7c8cb6e3485ffeb3b57f487affbf0f0691ccaafce019c71914a99c7492 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-5, org.opencontainers.image.authors=Ceph Release Team , io.buildah.version=1.41.3, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_REF=squid, org.label-schema.build-date=20260223) 2026-03-09T00:19:56.327 INFO:journalctl@ceph.osd.5.vm10.stdout:Mar 09 00:19:56 vm10 podman[94762]: 2026-03-09 00:19:56.203487882 +0000 UTC m=+0.059492901 container start 50d8ee7c8cb6e3485ffeb3b57f487affbf0f0691ccaafce019c71914a99c7492 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-5, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=squid, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.schema-version=1.0, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git) 2026-03-09T00:19:56.327 INFO:journalctl@ceph.osd.5.vm10.stdout:Mar 09 00:19:56 vm10 bash[94762]: 50d8ee7c8cb6e3485ffeb3b57f487affbf0f0691ccaafce019c71914a99c7492 2026-03-09T00:19:56.327 INFO:journalctl@ceph.osd.5.vm10.stdout:Mar 09 00:19:56 vm10 podman[94762]: 2026-03-09 00:19:56.154035775 +0000 UTC m=+0.010040794 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc 2026-03-09T00:19:56.327 INFO:journalctl@ceph.osd.5.vm10.stdout:Mar 09 00:19:56 vm10 systemd[1]: Started Ceph osd.5 for fdcbddf6-1b49-11f1-80b0-7392062373f9. 2026-03-09T00:19:56.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:56 vm04 ceph-mon[96438]: osdmap e124: 8 total, 7 up, 8 in 2026-03-09T00:19:56.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:56 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:56.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:56 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:56.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:56 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:19:56.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:56 vm04 ceph-mon[94619]: osdmap e124: 8 total, 7 up, 8 in 2026-03-09T00:19:56.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:56 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:56.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:56 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:56.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:56 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:19:57.198 INFO:journalctl@ceph.osd.5.vm10.stdout:Mar 09 00:19:57 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-5[94772]: 2026-03-09T00:19:57.032+0000 7f91f5083740 -1 Falling back to public interface 2026-03-09T00:19:57.325 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:19:57 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:19:57.018Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:19:57.325 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:19:57 vm04 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:19:57.019Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:19:57.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:57 vm10 ceph-mon[82076]: pgmap v127: 161 pgs: 21 peering, 14 stale+active+clean, 126 active+clean; 457 KiB data, 208 MiB used, 160 GiB / 160 GiB avail 2026-03-09T00:19:57.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:57 vm10 ceph-mon[82076]: Health check failed: Reduced data availability: 2 pgs inactive, 5 pgs peering (PG_AVAILABILITY) 2026-03-09T00:19:57.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:57 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:57.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:57 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:19:57.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:57 vm04 ceph-mon[96438]: pgmap v127: 161 pgs: 21 peering, 14 stale+active+clean, 126 active+clean; 457 KiB data, 208 MiB used, 160 GiB / 160 GiB avail 2026-03-09T00:19:57.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:57 vm04 ceph-mon[96438]: Health check failed: Reduced data availability: 2 pgs inactive, 5 pgs peering (PG_AVAILABILITY) 2026-03-09T00:19:57.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:57 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:57.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:57 vm04 ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:19:57.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:57 vm04 ceph-mon[94619]: pgmap v127: 161 pgs: 21 peering, 14 stale+active+clean, 126 active+clean; 457 KiB data, 208 MiB used, 160 GiB / 160 GiB avail 2026-03-09T00:19:57.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:57 vm04 ceph-mon[94619]: Health check failed: Reduced data availability: 2 pgs inactive, 5 pgs peering (PG_AVAILABILITY) 2026-03-09T00:19:57.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:57 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:57.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:57 vm04 ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:19:58.078 INFO:journalctl@ceph.osd.5.vm10.stdout:Mar 09 00:19:57 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-5[94772]: 2026-03-09T00:19:57.650+0000 7f91f5083740 -1 osd.5 0 read_superblock omap replica is missing. 2026-03-09T00:19:58.078 INFO:journalctl@ceph.osd.5.vm10.stdout:Mar 09 00:19:57 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-5[94772]: 2026-03-09T00:19:57.688+0000 7f91f5083740 -1 osd.5 122 log_to_monitors true 2026-03-09T00:19:58.483 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:58 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:58.740 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:58 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:58.740 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:58 vm10 ceph-mon[82076]: from='osd.5 [v2:192.168.123.110:6808/513271687,v1:192.168.123.110:6809/513271687]' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-09T00:19:58.740 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:58 vm10 ceph-mon[82076]: from='osd.5 ' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-09T00:19:58.740 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:58 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:58.740 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:58 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:58.740 INFO:journalctl@ceph.osd.5.vm10.stdout:Mar 09 00:19:58 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-5[94772]: 2026-03-09T00:19:58.518+0000 7f91ece2e640 -1 osd.5 122 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-09T00:19:58.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:58 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:58.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:58 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:58.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:58 vm04.local ceph-mon[96438]: from='osd.5 [v2:192.168.123.110:6808/513271687,v1:192.168.123.110:6809/513271687]' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-09T00:19:58.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:58 vm04.local ceph-mon[96438]: from='osd.5 ' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-09T00:19:58.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:58 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:58.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:58 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:58.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:58 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:58.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:58 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:58.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:58 vm04.local ceph-mon[94619]: from='osd.5 [v2:192.168.123.110:6808/513271687,v1:192.168.123.110:6809/513271687]' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-09T00:19:58.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:58 vm04.local ceph-mon[94619]: from='osd.5 ' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-09T00:19:58.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:58 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:58.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:58 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:59.351 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:19:58 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: 2026-03-09T00:19:58.910+0000 7fe15b141640 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (18 PGs are or would become offline) 2026-03-09T00:19:59.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:59 vm10 ceph-mon[82076]: pgmap v128: 161 pgs: 9 active+undersized, 21 peering, 7 stale+active+clean, 8 active+undersized+degraded, 116 active+clean; 457 KiB data, 208 MiB used, 160 GiB / 160 GiB avail; 32/723 objects degraded (4.426%) 2026-03-09T00:19:59.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:59 vm10 ceph-mon[82076]: from='osd.5 ' entity='osd.5' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]': finished 2026-03-09T00:19:59.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:59 vm10 ceph-mon[82076]: osdmap e125: 8 total, 7 up, 8 in 2026-03-09T00:19:59.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:59 vm10 ceph-mon[82076]: from='osd.5 [v2:192.168.123.110:6808/513271687,v1:192.168.123.110:6809/513271687]' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm10", "root=default"]}]: dispatch 2026-03-09T00:19:59.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:59 vm10 ceph-mon[82076]: from='osd.5 ' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm10", "root=default"]}]: dispatch 2026-03-09T00:19:59.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:59 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:59.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:59 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:59.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:59 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:19:59.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:59 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:19:59.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:59 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:59.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:59 vm10 ceph-mon[82076]: Health check failed: Degraded data redundancy: 32/723 objects degraded (4.426%), 8 pgs degraded (PG_DEGRADED) 2026-03-09T00:19:59.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:59 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:19:59.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:59 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:19:59.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:59 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:19:59.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:59 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:19:59.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:19:59 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["6"], "max": 16}]: dispatch 2026-03-09T00:19:59.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:59 vm04.local ceph-mon[96438]: pgmap v128: 161 pgs: 9 active+undersized, 21 peering, 7 stale+active+clean, 8 active+undersized+degraded, 116 active+clean; 457 KiB data, 208 MiB used, 160 GiB / 160 GiB avail; 32/723 objects degraded (4.426%) 2026-03-09T00:19:59.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:59 vm04.local ceph-mon[96438]: from='osd.5 ' entity='osd.5' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]': finished 2026-03-09T00:19:59.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:59 vm04.local ceph-mon[96438]: osdmap e125: 8 total, 7 up, 8 in 2026-03-09T00:19:59.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:59 vm04.local ceph-mon[96438]: from='osd.5 [v2:192.168.123.110:6808/513271687,v1:192.168.123.110:6809/513271687]' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm10", "root=default"]}]: dispatch 2026-03-09T00:19:59.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:59 vm04.local ceph-mon[96438]: from='osd.5 ' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm10", "root=default"]}]: dispatch 2026-03-09T00:19:59.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:59 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:59.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:59 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:59.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:59 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:19:59.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:59 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:19:59.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:59 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:59.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:59 vm04.local ceph-mon[96438]: Health check failed: Degraded data redundancy: 32/723 objects degraded (4.426%), 8 pgs degraded (PG_DEGRADED) 2026-03-09T00:19:59.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:59 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:19:59.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:59 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:19:59.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:59 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:19:59.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:59 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:19:59.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:19:59 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["6"], "max": 16}]: dispatch 2026-03-09T00:19:59.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:59 vm04.local ceph-mon[94619]: pgmap v128: 161 pgs: 9 active+undersized, 21 peering, 7 stale+active+clean, 8 active+undersized+degraded, 116 active+clean; 457 KiB data, 208 MiB used, 160 GiB / 160 GiB avail; 32/723 objects degraded (4.426%) 2026-03-09T00:19:59.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:59 vm04.local ceph-mon[94619]: from='osd.5 ' entity='osd.5' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]': finished 2026-03-09T00:19:59.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:59 vm04.local ceph-mon[94619]: osdmap e125: 8 total, 7 up, 8 in 2026-03-09T00:19:59.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:59 vm04.local ceph-mon[94619]: from='osd.5 [v2:192.168.123.110:6808/513271687,v1:192.168.123.110:6809/513271687]' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm10", "root=default"]}]: dispatch 2026-03-09T00:19:59.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:59 vm04.local ceph-mon[94619]: from='osd.5 ' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm10", "root=default"]}]: dispatch 2026-03-09T00:19:59.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:59 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:59.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:59 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:59.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:59 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:19:59.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:59 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:19:59.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:59 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:19:59.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:59 vm04.local ceph-mon[94619]: Health check failed: Degraded data redundancy: 32/723 objects degraded (4.426%), 8 pgs degraded (PG_DEGRADED) 2026-03-09T00:19:59.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:59 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:19:59.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:59 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:19:59.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:59 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:19:59.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:59 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:19:59.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:19:59 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["6"], "max": 16}]: dispatch 2026-03-09T00:20:00.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:00 vm04.local ceph-mon[96438]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["6"], "max": 16}]: dispatch 2026-03-09T00:20:00.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:00 vm04.local ceph-mon[96438]: Upgrade: unsafe to stop osd(s) at this time (18 PGs are or would become offline) 2026-03-09T00:20:00.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:00 vm04.local ceph-mon[96438]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-09T00:20:00.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:00 vm04.local ceph-mon[96438]: osd.5 [v2:192.168.123.110:6808/513271687,v1:192.168.123.110:6809/513271687] boot 2026-03-09T00:20:00.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:00 vm04.local ceph-mon[96438]: osdmap e126: 8 total, 8 up, 8 in 2026-03-09T00:20:00.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:00 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T00:20:00.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:00 vm04.local ceph-mon[96438]: Health detail: HEALTH_WARN Reduced data availability: 2 pgs inactive, 5 pgs peering; Degraded data redundancy: 32/723 objects degraded (4.426%), 8 pgs degraded 2026-03-09T00:20:00.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:00 vm04.local ceph-mon[96438]: [WRN] PG_AVAILABILITY: Reduced data availability: 2 pgs inactive, 5 pgs peering 2026-03-09T00:20:00.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:00 vm04.local ceph-mon[96438]: pg 2.3 is stuck peering for 77s, current state peering, last acting [2,7] 2026-03-09T00:20:00.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:00 vm04.local ceph-mon[96438]: pg 2.12 is stuck peering for 23m, current state peering, last acting [3,7] 2026-03-09T00:20:00.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:00 vm04.local ceph-mon[96438]: pg 3.5 is stuck peering for 2m, current state peering, last acting [3,2] 2026-03-09T00:20:00.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:00 vm04.local ceph-mon[96438]: pg 3.c is stuck peering for 2m, current state peering, last acting [3,6] 2026-03-09T00:20:00.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:00 vm04.local ceph-mon[96438]: pg 5.1b is stuck peering for 2m, current state peering, last acting [0,7] 2026-03-09T00:20:00.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:00 vm04.local ceph-mon[96438]: [WRN] PG_DEGRADED: Degraded data redundancy: 32/723 objects degraded (4.426%), 8 pgs degraded 2026-03-09T00:20:00.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:00 vm04.local ceph-mon[96438]: pg 2.18 is active+undersized+degraded, acting [4,7] 2026-03-09T00:20:00.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:00 vm04.local ceph-mon[96438]: pg 2.1c is active+undersized+degraded, acting [7,2] 2026-03-09T00:20:00.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:00 vm04.local ceph-mon[96438]: pg 3.d is active+undersized+degraded, acting [7,6] 2026-03-09T00:20:00.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:00 vm04.local ceph-mon[96438]: pg 3.16 is active+undersized+degraded, acting [7,1] 2026-03-09T00:20:00.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:00 vm04.local ceph-mon[96438]: pg 3.1c is active+undersized+degraded, acting [4,1] 2026-03-09T00:20:00.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:00 vm04.local ceph-mon[96438]: pg 3.1d is active+undersized+degraded, acting [4,6] 2026-03-09T00:20:00.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:00 vm04.local ceph-mon[96438]: pg 4.15 is active+undersized+degraded, acting [7,3] 2026-03-09T00:20:00.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:00 vm04.local ceph-mon[96438]: pg 6.1a is active+undersized+degraded, acting [4,1] 2026-03-09T00:20:00.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:00 vm04.local ceph-mon[96438]: osdmap e127: 8 total, 8 up, 8 in 2026-03-09T00:20:00.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:00 vm04.local ceph-mon[94619]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["6"], "max": 16}]: dispatch 2026-03-09T00:20:00.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:00 vm04.local ceph-mon[94619]: Upgrade: unsafe to stop osd(s) at this time (18 PGs are or would become offline) 2026-03-09T00:20:00.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:00 vm04.local ceph-mon[94619]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-09T00:20:00.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:00 vm04.local ceph-mon[94619]: osd.5 [v2:192.168.123.110:6808/513271687,v1:192.168.123.110:6809/513271687] boot 2026-03-09T00:20:00.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:00 vm04.local ceph-mon[94619]: osdmap e126: 8 total, 8 up, 8 in 2026-03-09T00:20:00.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:00 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T00:20:00.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:00 vm04.local ceph-mon[94619]: Health detail: HEALTH_WARN Reduced data availability: 2 pgs inactive, 5 pgs peering; Degraded data redundancy: 32/723 objects degraded (4.426%), 8 pgs degraded 2026-03-09T00:20:00.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:00 vm04.local ceph-mon[94619]: [WRN] PG_AVAILABILITY: Reduced data availability: 2 pgs inactive, 5 pgs peering 2026-03-09T00:20:00.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:00 vm04.local ceph-mon[94619]: pg 2.3 is stuck peering for 77s, current state peering, last acting [2,7] 2026-03-09T00:20:00.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:00 vm04.local ceph-mon[94619]: pg 2.12 is stuck peering for 23m, current state peering, last acting [3,7] 2026-03-09T00:20:00.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:00 vm04.local ceph-mon[94619]: pg 3.5 is stuck peering for 2m, current state peering, last acting [3,2] 2026-03-09T00:20:00.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:00 vm04.local ceph-mon[94619]: pg 3.c is stuck peering for 2m, current state peering, last acting [3,6] 2026-03-09T00:20:00.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:00 vm04.local ceph-mon[94619]: pg 5.1b is stuck peering for 2m, current state peering, last acting [0,7] 2026-03-09T00:20:00.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:00 vm04.local ceph-mon[94619]: [WRN] PG_DEGRADED: Degraded data redundancy: 32/723 objects degraded (4.426%), 8 pgs degraded 2026-03-09T00:20:00.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:00 vm04.local ceph-mon[94619]: pg 2.18 is active+undersized+degraded, acting [4,7] 2026-03-09T00:20:00.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:00 vm04.local ceph-mon[94619]: pg 2.1c is active+undersized+degraded, acting [7,2] 2026-03-09T00:20:00.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:00 vm04.local ceph-mon[94619]: pg 3.d is active+undersized+degraded, acting [7,6] 2026-03-09T00:20:00.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:00 vm04.local ceph-mon[94619]: pg 3.16 is active+undersized+degraded, acting [7,1] 2026-03-09T00:20:00.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:00 vm04.local ceph-mon[94619]: pg 3.1c is active+undersized+degraded, acting [4,1] 2026-03-09T00:20:00.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:00 vm04.local ceph-mon[94619]: pg 3.1d is active+undersized+degraded, acting [4,6] 2026-03-09T00:20:00.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:00 vm04.local ceph-mon[94619]: pg 4.15 is active+undersized+degraded, acting [7,3] 2026-03-09T00:20:00.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:00 vm04.local ceph-mon[94619]: pg 6.1a is active+undersized+degraded, acting [4,1] 2026-03-09T00:20:00.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:00 vm04.local ceph-mon[94619]: osdmap e127: 8 total, 8 up, 8 in 2026-03-09T00:20:01.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:00 vm10 ceph-mon[82076]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["6"], "max": 16}]: dispatch 2026-03-09T00:20:01.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:00 vm10 ceph-mon[82076]: Upgrade: unsafe to stop osd(s) at this time (18 PGs are or would become offline) 2026-03-09T00:20:01.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:00 vm10 ceph-mon[82076]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-09T00:20:01.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:00 vm10 ceph-mon[82076]: osd.5 [v2:192.168.123.110:6808/513271687,v1:192.168.123.110:6809/513271687] boot 2026-03-09T00:20:01.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:00 vm10 ceph-mon[82076]: osdmap e126: 8 total, 8 up, 8 in 2026-03-09T00:20:01.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:00 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T00:20:01.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:00 vm10 ceph-mon[82076]: Health detail: HEALTH_WARN Reduced data availability: 2 pgs inactive, 5 pgs peering; Degraded data redundancy: 32/723 objects degraded (4.426%), 8 pgs degraded 2026-03-09T00:20:01.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:00 vm10 ceph-mon[82076]: [WRN] PG_AVAILABILITY: Reduced data availability: 2 pgs inactive, 5 pgs peering 2026-03-09T00:20:01.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:00 vm10 ceph-mon[82076]: pg 2.3 is stuck peering for 77s, current state peering, last acting [2,7] 2026-03-09T00:20:01.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:00 vm10 ceph-mon[82076]: pg 2.12 is stuck peering for 23m, current state peering, last acting [3,7] 2026-03-09T00:20:01.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:00 vm10 ceph-mon[82076]: pg 3.5 is stuck peering for 2m, current state peering, last acting [3,2] 2026-03-09T00:20:01.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:00 vm10 ceph-mon[82076]: pg 3.c is stuck peering for 2m, current state peering, last acting [3,6] 2026-03-09T00:20:01.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:00 vm10 ceph-mon[82076]: pg 5.1b is stuck peering for 2m, current state peering, last acting [0,7] 2026-03-09T00:20:01.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:00 vm10 ceph-mon[82076]: [WRN] PG_DEGRADED: Degraded data redundancy: 32/723 objects degraded (4.426%), 8 pgs degraded 2026-03-09T00:20:01.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:00 vm10 ceph-mon[82076]: pg 2.18 is active+undersized+degraded, acting [4,7] 2026-03-09T00:20:01.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:00 vm10 ceph-mon[82076]: pg 2.1c is active+undersized+degraded, acting [7,2] 2026-03-09T00:20:01.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:00 vm10 ceph-mon[82076]: pg 3.d is active+undersized+degraded, acting [7,6] 2026-03-09T00:20:01.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:00 vm10 ceph-mon[82076]: pg 3.16 is active+undersized+degraded, acting [7,1] 2026-03-09T00:20:01.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:00 vm10 ceph-mon[82076]: pg 3.1c is active+undersized+degraded, acting [4,1] 2026-03-09T00:20:01.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:00 vm10 ceph-mon[82076]: pg 3.1d is active+undersized+degraded, acting [4,6] 2026-03-09T00:20:01.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:00 vm10 ceph-mon[82076]: pg 4.15 is active+undersized+degraded, acting [7,3] 2026-03-09T00:20:01.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:00 vm10 ceph-mon[82076]: pg 6.1a is active+undersized+degraded, acting [4,1] 2026-03-09T00:20:01.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:00 vm10 ceph-mon[82076]: osdmap e127: 8 total, 8 up, 8 in 2026-03-09T00:20:01.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:01 vm04.local ceph-mon[96438]: pgmap v131: 161 pgs: 24 active+undersized, 21 peering, 13 active+undersized+degraded, 103 active+clean; 457 KiB data, 230 MiB used, 160 GiB / 160 GiB avail; 49/723 objects degraded (6.777%) 2026-03-09T00:20:01.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:01 vm04.local ceph-mon[94619]: pgmap v131: 161 pgs: 24 active+undersized, 21 peering, 13 active+undersized+degraded, 103 active+clean; 457 KiB data, 230 MiB used, 160 GiB / 160 GiB avail; 49/723 objects degraded (6.777%) 2026-03-09T00:20:02.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:01 vm10 ceph-mon[82076]: pgmap v131: 161 pgs: 24 active+undersized, 21 peering, 13 active+undersized+degraded, 103 active+clean; 457 KiB data, 230 MiB used, 160 GiB / 160 GiB avail; 49/723 objects degraded (6.777%) 2026-03-09T00:20:03.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:03 vm04.local ceph-mon[96438]: pgmap v133: 161 pgs: 20 active+undersized, 21 peering, 12 active+undersized+degraded, 108 active+clean; 457 KiB data, 230 MiB used, 160 GiB / 160 GiB avail; 48/723 objects degraded (6.639%) 2026-03-09T00:20:03.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:03 vm04.local ceph-mon[94619]: pgmap v133: 161 pgs: 20 active+undersized, 21 peering, 12 active+undersized+degraded, 108 active+clean; 457 KiB data, 230 MiB used, 160 GiB / 160 GiB avail; 48/723 objects degraded (6.639%) 2026-03-09T00:20:04.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:03 vm10 ceph-mon[82076]: pgmap v133: 161 pgs: 20 active+undersized, 21 peering, 12 active+undersized+degraded, 108 active+clean; 457 KiB data, 230 MiB used, 160 GiB / 160 GiB avail; 48/723 objects degraded (6.639%) 2026-03-09T00:20:04.622 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:04 vm10 ceph-mon[82076]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:20:04.895 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:04 vm04.local ceph-mon[96438]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:20:04.895 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:04 vm04.local ceph-mon[94619]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:20:05.351 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:20:05 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:20:05] "GET /metrics HTTP/1.1" 200 37847 "" "Prometheus/2.51.0" 2026-03-09T00:20:05.852 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:05 vm04.local ceph-mon[96438]: pgmap v134: 161 pgs: 7 active+undersized, 14 peering, 1 active+undersized+degraded, 139 active+clean; 457 KiB data, 231 MiB used, 160 GiB / 160 GiB avail; 11/723 objects degraded (1.521%) 2026-03-09T00:20:05.852 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:05 vm04.local ceph-mon[96438]: Health check update: Degraded data redundancy: 11/723 objects degraded (1.521%), 1 pg degraded (PG_DEGRADED) 2026-03-09T00:20:05.852 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:05 vm04.local ceph-mon[96438]: Health check cleared: PG_AVAILABILITY (was: Reduced data availability: 2 pgs inactive, 5 pgs peering) 2026-03-09T00:20:05.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:05 vm04.local ceph-mon[94619]: pgmap v134: 161 pgs: 7 active+undersized, 14 peering, 1 active+undersized+degraded, 139 active+clean; 457 KiB data, 231 MiB used, 160 GiB / 160 GiB avail; 11/723 objects degraded (1.521%) 2026-03-09T00:20:05.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:05 vm04.local ceph-mon[94619]: Health check update: Degraded data redundancy: 11/723 objects degraded (1.521%), 1 pg degraded (PG_DEGRADED) 2026-03-09T00:20:05.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:05 vm04.local ceph-mon[94619]: Health check cleared: PG_AVAILABILITY (was: Reduced data availability: 2 pgs inactive, 5 pgs peering) 2026-03-09T00:20:06.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:05 vm10 ceph-mon[82076]: pgmap v134: 161 pgs: 7 active+undersized, 14 peering, 1 active+undersized+degraded, 139 active+clean; 457 KiB data, 231 MiB used, 160 GiB / 160 GiB avail; 11/723 objects degraded (1.521%) 2026-03-09T00:20:06.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:05 vm10 ceph-mon[82076]: Health check update: Degraded data redundancy: 11/723 objects degraded (1.521%), 1 pg degraded (PG_DEGRADED) 2026-03-09T00:20:06.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:05 vm10 ceph-mon[82076]: Health check cleared: PG_AVAILABILITY (was: Reduced data availability: 2 pgs inactive, 5 pgs peering) 2026-03-09T00:20:07.019 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:06 vm04.local ceph-mon[96438]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 11/723 objects degraded (1.521%), 1 pg degraded) 2026-03-09T00:20:07.019 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:06 vm04.local ceph-mon[96438]: Cluster is now healthy 2026-03-09T00:20:07.019 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:06 vm04.local ceph-mon[94619]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 11/723 objects degraded (1.521%), 1 pg degraded) 2026-03-09T00:20:07.019 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:06 vm04.local ceph-mon[94619]: Cluster is now healthy 2026-03-09T00:20:07.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:06 vm10 ceph-mon[82076]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 11/723 objects degraded (1.521%), 1 pg degraded) 2026-03-09T00:20:07.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:06 vm10 ceph-mon[82076]: Cluster is now healthy 2026-03-09T00:20:07.351 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:20:07 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:20:07.018Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:20:07.351 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:20:07 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:20:07.019Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:20:08.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:07 vm10 ceph-mon[82076]: pgmap v135: 161 pgs: 161 active+clean; 457 KiB data, 231 MiB used, 160 GiB / 160 GiB avail; 414 B/s rd, 0 op/s 2026-03-09T00:20:08.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:07 vm04.local ceph-mon[96438]: pgmap v135: 161 pgs: 161 active+clean; 457 KiB data, 231 MiB used, 160 GiB / 160 GiB avail; 414 B/s rd, 0 op/s 2026-03-09T00:20:08.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:07 vm04.local ceph-mon[94619]: pgmap v135: 161 pgs: 161 active+clean; 457 KiB data, 231 MiB used, 160 GiB / 160 GiB avail; 414 B/s rd, 0 op/s 2026-03-09T00:20:09.703 INFO:teuthology.orchestra.run.vm04.stdout:true 2026-03-09T00:20:10.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:09 vm10 ceph-mon[82076]: pgmap v136: 161 pgs: 161 active+clean; 457 KiB data, 231 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-09T00:20:10.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:09 vm04.local ceph-mon[94619]: pgmap v136: 161 pgs: 161 active+clean; 457 KiB data, 231 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-09T00:20:10.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:09 vm04.local ceph-mon[96438]: pgmap v136: 161 pgs: 161 active+clean; 457 KiB data, 231 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-09T00:20:10.114 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-09T00:20:10.114 INFO:teuthology.orchestra.run.vm04.stdout:alertmanager.a vm04 *:9093,9094 running (16m) 56s ago 23m 28.2M - 0.25.0 c8568f914cd2 bcac0140b0f6 2026-03-09T00:20:10.114 INFO:teuthology.orchestra.run.vm04.stdout:grafana.a vm10 *:3000 running (4m) 12s ago 23m 85.8M - 10.4.0 c8b91775d855 aa7f793dcb8e 2026-03-09T00:20:10.114 INFO:teuthology.orchestra.run.vm04.stdout:iscsi.foo.vm04.fbyciv vm04 running (4m) 56s ago 23m 51.9M - 3.5 e1d6a67b021e cdb4168e72eb 2026-03-09T00:20:10.114 INFO:teuthology.orchestra.run.vm04.stdout:mgr.x vm10 *:8443,9283,8765 running (4m) 12s ago 25m 489M - 19.2.3-678-ge911bdeb 654f31e6858e c971ca6e9652 2026-03-09T00:20:10.114 INFO:teuthology.orchestra.run.vm04.stdout:mgr.y vm04 *:8443,9283,8765 running (14m) 56s ago 26m 557M - 19.2.3-678-ge911bdeb 654f31e6858e 72a51572b51b 2026-03-09T00:20:10.114 INFO:teuthology.orchestra.run.vm04.stdout:mon.a vm04 running (3m) 56s ago 26m 53.9M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 3a1ecb9ee7d1 2026-03-09T00:20:10.114 INFO:teuthology.orchestra.run.vm04.stdout:mon.b vm10 running (4m) 12s ago 25m 47.8M 2048M 19.2.3-678-ge911bdeb 654f31e6858e b102ade927df 2026-03-09T00:20:10.114 INFO:teuthology.orchestra.run.vm04.stdout:mon.c vm04 running (3m) 56s ago 25m 41.4M 2048M 19.2.3-678-ge911bdeb 654f31e6858e d5119f6d2345 2026-03-09T00:20:10.114 INFO:teuthology.orchestra.run.vm04.stdout:node-exporter.a vm04 *:9100 running (16m) 56s ago 23m 9.78M - 1.7.0 72c9c2088986 38e0af6b2fbf 2026-03-09T00:20:10.114 INFO:teuthology.orchestra.run.vm04.stdout:node-exporter.b vm10 *:9100 running (16m) 12s ago 23m 10.2M - 1.7.0 72c9c2088986 d059c0022310 2026-03-09T00:20:10.114 INFO:teuthology.orchestra.run.vm04.stdout:osd.0 vm04 running (2m) 56s ago 25m 70.7M 4096M 19.2.3-678-ge911bdeb 654f31e6858e a5eb77bcb38b 2026-03-09T00:20:10.114 INFO:teuthology.orchestra.run.vm04.stdout:osd.1 vm04 running (2m) 56s ago 25m 49.3M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 522cf40e592d 2026-03-09T00:20:10.114 INFO:teuthology.orchestra.run.vm04.stdout:osd.2 vm04 running (93s) 56s ago 24m 45.4M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 69a18f90367f 2026-03-09T00:20:10.114 INFO:teuthology.orchestra.run.vm04.stdout:osd.3 vm04 running (57s) 56s ago 24m 13.2M 4096M 19.2.3-678-ge911bdeb 654f31e6858e e8bef19a96a6 2026-03-09T00:20:10.114 INFO:teuthology.orchestra.run.vm04.stdout:osd.4 vm10 running (35s) 12s ago 24m 69.8M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 5cd5f044c189 2026-03-09T00:20:10.114 INFO:teuthology.orchestra.run.vm04.stdout:osd.5 vm10 running (13s) 12s ago 24m 15.4M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 50d8ee7c8cb6 2026-03-09T00:20:10.114 INFO:teuthology.orchestra.run.vm04.stdout:osd.6 vm10 running (24m) 12s ago 24m 57.2M 4096M 17.2.0 e1d6a67b021e 168db5828111 2026-03-09T00:20:10.114 INFO:teuthology.orchestra.run.vm04.stdout:osd.7 vm10 running (24m) 12s ago 24m 62.0M 4096M 17.2.0 e1d6a67b021e bc6bbac15079 2026-03-09T00:20:10.114 INFO:teuthology.orchestra.run.vm04.stdout:prometheus.a vm10 *:9095 running (4m) 12s ago 23m 52.0M - 2.51.0 1d3b7f56885b 1f53121cfa7f 2026-03-09T00:20:10.114 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.ehrfsf vm04 *:8000 running (23m) 56s ago 23m 100M - 17.2.0 e1d6a67b021e a815abb0c790 2026-03-09T00:20:10.114 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm10.dwizvi vm10 *:8000 running (23m) 12s ago 23m 98.5M - 17.2.0 e1d6a67b021e f6412acdf6e0 2026-03-09T00:20:10.360 INFO:teuthology.orchestra.run.vm04.stdout:{ 2026-03-09T00:20:10.360 INFO:teuthology.orchestra.run.vm04.stdout: "mon": { 2026-03-09T00:20:10.360 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 3 2026-03-09T00:20:10.361 INFO:teuthology.orchestra.run.vm04.stdout: }, 2026-03-09T00:20:10.361 INFO:teuthology.orchestra.run.vm04.stdout: "mgr": { 2026-03-09T00:20:10.361 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-09T00:20:10.361 INFO:teuthology.orchestra.run.vm04.stdout: }, 2026-03-09T00:20:10.361 INFO:teuthology.orchestra.run.vm04.stdout: "osd": { 2026-03-09T00:20:10.361 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 2, 2026-03-09T00:20:10.361 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 6 2026-03-09T00:20:10.361 INFO:teuthology.orchestra.run.vm04.stdout: }, 2026-03-09T00:20:10.361 INFO:teuthology.orchestra.run.vm04.stdout: "rgw": { 2026-03-09T00:20:10.361 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 2 2026-03-09T00:20:10.361 INFO:teuthology.orchestra.run.vm04.stdout: }, 2026-03-09T00:20:10.361 INFO:teuthology.orchestra.run.vm04.stdout: "overall": { 2026-03-09T00:20:10.361 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 4, 2026-03-09T00:20:10.361 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 11 2026-03-09T00:20:10.361 INFO:teuthology.orchestra.run.vm04.stdout: } 2026-03-09T00:20:10.361 INFO:teuthology.orchestra.run.vm04.stdout:} 2026-03-09T00:20:10.576 INFO:teuthology.orchestra.run.vm04.stdout:{ 2026-03-09T00:20:10.576 INFO:teuthology.orchestra.run.vm04.stdout: "target_image": "quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc", 2026-03-09T00:20:10.576 INFO:teuthology.orchestra.run.vm04.stdout: "in_progress": true, 2026-03-09T00:20:10.576 INFO:teuthology.orchestra.run.vm04.stdout: "which": "Upgrading daemons of type(s) crash,osd", 2026-03-09T00:20:10.576 INFO:teuthology.orchestra.run.vm04.stdout: "services_complete": [], 2026-03-09T00:20:10.576 INFO:teuthology.orchestra.run.vm04.stdout: "progress": "6/8 daemons upgraded", 2026-03-09T00:20:10.576 INFO:teuthology.orchestra.run.vm04.stdout: "message": "Currently upgrading osd daemons", 2026-03-09T00:20:10.576 INFO:teuthology.orchestra.run.vm04.stdout: "is_paused": false 2026-03-09T00:20:10.576 INFO:teuthology.orchestra.run.vm04.stdout:} 2026-03-09T00:20:10.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:10 vm04.local ceph-mon[96438]: from='client.44367 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:20:10.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:10 vm04.local ceph-mon[96438]: from='client.? 192.168.123.104:0/2329800745' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:20:10.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:10 vm04.local ceph-mon[94619]: from='client.44367 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:20:10.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:10 vm04.local ceph-mon[94619]: from='client.? 192.168.123.104:0/2329800745' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:20:11.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:10 vm10 ceph-mon[82076]: from='client.44367 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:20:11.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:10 vm10 ceph-mon[82076]: from='client.? 192.168.123.104:0/2329800745' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:20:12.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:11 vm10 ceph-mon[82076]: from='client.44370 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:20:12.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:11 vm10 ceph-mon[82076]: pgmap v137: 161 pgs: 161 active+clean; 457 KiB data, 231 MiB used, 160 GiB / 160 GiB avail; 921 B/s rd, 0 op/s 2026-03-09T00:20:12.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:11 vm10 ceph-mon[82076]: from='client.44376 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:20:12.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:11 vm10 ceph-mon[82076]: from='client.44385 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:20:12.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:11 vm04.local ceph-mon[96438]: from='client.44370 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:20:12.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:11 vm04.local ceph-mon[96438]: pgmap v137: 161 pgs: 161 active+clean; 457 KiB data, 231 MiB used, 160 GiB / 160 GiB avail; 921 B/s rd, 0 op/s 2026-03-09T00:20:12.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:11 vm04.local ceph-mon[96438]: from='client.44376 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:20:12.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:11 vm04.local ceph-mon[96438]: from='client.44385 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:20:12.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:11 vm04.local ceph-mon[94619]: from='client.44370 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:20:12.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:11 vm04.local ceph-mon[94619]: pgmap v137: 161 pgs: 161 active+clean; 457 KiB data, 231 MiB used, 160 GiB / 160 GiB avail; 921 B/s rd, 0 op/s 2026-03-09T00:20:12.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:11 vm04.local ceph-mon[94619]: from='client.44376 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:20:12.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:11 vm04.local ceph-mon[94619]: from='client.44385 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:20:13.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:12 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:13.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:12 vm10 ceph-mon[82076]: pgmap v138: 161 pgs: 161 active+clean; 457 KiB data, 231 MiB used, 160 GiB / 160 GiB avail; 899 B/s rd, 0 op/s 2026-03-09T00:20:13.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:12 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:20:13.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:12 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:13.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:12 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:13.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:12 vm04.local ceph-mon[96438]: pgmap v138: 161 pgs: 161 active+clean; 457 KiB data, 231 MiB used, 160 GiB / 160 GiB avail; 899 B/s rd, 0 op/s 2026-03-09T00:20:13.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:12 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:20:13.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:12 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:13.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:12 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:13.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:12 vm04.local ceph-mon[94619]: pgmap v138: 161 pgs: 161 active+clean; 457 KiB data, 231 MiB used, 160 GiB / 160 GiB avail; 899 B/s rd, 0 op/s 2026-03-09T00:20:13.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:12 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:20:13.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:12 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:14.210 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:13 vm10 ceph-mon[82076]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:20:14.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:13 vm04.local ceph-mon[96438]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:20:14.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:13 vm04.local ceph-mon[94619]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:20:14.988 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:14 vm10 ceph-mon[82076]: pgmap v139: 161 pgs: 161 active+clean; 457 KiB data, 231 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:20:14.988 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:14 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["6"], "max": 16}]: dispatch 2026-03-09T00:20:14.988 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:14 vm10 ceph-mon[82076]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["6"], "max": 16}]: dispatch 2026-03-09T00:20:14.988 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:14 vm10 ceph-mon[82076]: Upgrade: osd.6 is safe to restart 2026-03-09T00:20:14.988 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:14 vm10 ceph-mon[82076]: Upgrade: Updating osd.6 2026-03-09T00:20:14.988 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:14 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:14.988 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:14 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.6"}]: dispatch 2026-03-09T00:20:14.988 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:14 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:20:14.988 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:14 vm10 ceph-mon[82076]: Deploying daemon osd.6 on vm10 2026-03-09T00:20:14.988 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:20:14 vm10 systemd[1]: Stopping Ceph osd.6 for fdcbddf6-1b49-11f1-80b0-7392062373f9... 2026-03-09T00:20:15.329 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:20:15 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-6[58006]: 2026-03-09T00:20:15.062+0000 7f719bf50700 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.6 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-09T00:20:15.329 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:20:15 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-6[58006]: 2026-03-09T00:20:15.062+0000 7f719bf50700 -1 osd.6 127 *** Got signal Terminated *** 2026-03-09T00:20:15.329 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:20:15 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-6[58006]: 2026-03-09T00:20:15.062+0000 7f719bf50700 -1 osd.6 127 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-09T00:20:15.350 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:20:15 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:20:15] "GET /metrics HTTP/1.1" 200 37856 "" "Prometheus/2.51.0" 2026-03-09T00:20:15.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:14 vm04.local ceph-mon[96438]: pgmap v139: 161 pgs: 161 active+clean; 457 KiB data, 231 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:20:15.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:14 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["6"], "max": 16}]: dispatch 2026-03-09T00:20:15.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:14 vm04.local ceph-mon[96438]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["6"], "max": 16}]: dispatch 2026-03-09T00:20:15.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:14 vm04.local ceph-mon[96438]: Upgrade: osd.6 is safe to restart 2026-03-09T00:20:15.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:14 vm04.local ceph-mon[96438]: Upgrade: Updating osd.6 2026-03-09T00:20:15.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:14 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:15.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:14 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.6"}]: dispatch 2026-03-09T00:20:15.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:14 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:20:15.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:14 vm04.local ceph-mon[96438]: Deploying daemon osd.6 on vm10 2026-03-09T00:20:15.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:14 vm04.local ceph-mon[94619]: pgmap v139: 161 pgs: 161 active+clean; 457 KiB data, 231 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:20:15.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:14 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["6"], "max": 16}]: dispatch 2026-03-09T00:20:15.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:14 vm04.local ceph-mon[94619]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["6"], "max": 16}]: dispatch 2026-03-09T00:20:15.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:14 vm04.local ceph-mon[94619]: Upgrade: osd.6 is safe to restart 2026-03-09T00:20:15.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:14 vm04.local ceph-mon[94619]: Upgrade: Updating osd.6 2026-03-09T00:20:15.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:14 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:15.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:14 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.6"}]: dispatch 2026-03-09T00:20:15.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:14 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:20:15.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:14 vm04.local ceph-mon[94619]: Deploying daemon osd.6 on vm10 2026-03-09T00:20:16.180 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:15 vm10 ceph-mon[82076]: osd.6 marked itself down and dead 2026-03-09T00:20:16.183 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:20:15 vm10 podman[98477]: 2026-03-09 00:20:15.978156105 +0000 UTC m=+0.930030514 container died 168db582811136325fef9141e3c419dcb97015faa203644ea66af28aae4f8945 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-6, RELEASE=HEAD, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, io.openshift.tags=base centos centos-stream, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., name=centos-stream, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, GIT_BRANCH=HEAD, GIT_CLEAN=True, build-date=2022-05-03T08:36:31.336870, io.k8s.display-name=CentOS Stream 8, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, distribution-scope=public, io.openshift.expose-services=, vcs-type=git, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, maintainer=Guillaume Abrioux , architecture=x86_64, version=8, CEPH_POINT_RELEASE=-17.2.0, vendor=Red Hat, Inc., GIT_REPO=https://github.com/ceph/ceph-container.git, com.redhat.component=centos-stream-container, io.buildah.version=1.19.8, ceph=True, release=754) 2026-03-09T00:20:16.183 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:20:16 vm10 podman[98477]: 2026-03-09 00:20:16.004404643 +0000 UTC m=+0.956279052 container remove 168db582811136325fef9141e3c419dcb97015faa203644ea66af28aae4f8945 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-6, GIT_BRANCH=HEAD, com.redhat.component=centos-stream-container, io.k8s.display-name=CentOS Stream 8, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, architecture=x86_64, maintainer=Guillaume Abrioux , name=centos-stream, distribution-scope=public, RELEASE=HEAD, build-date=2022-05-03T08:36:31.336870, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, vendor=Red Hat, Inc., release=754, vcs-type=git, GIT_REPO=https://github.com/ceph/ceph-container.git, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, ceph=True, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.tags=base centos centos-stream, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, GIT_CLEAN=True, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, io.buildah.version=1.19.8, CEPH_POINT_RELEASE=-17.2.0, io.openshift.expose-services=, version=8) 2026-03-09T00:20:16.183 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:20:16 vm10 bash[98477]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-6 2026-03-09T00:20:16.183 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:20:16 vm10 podman[98544]: 2026-03-09 00:20:16.151044823 +0000 UTC m=+0.017484456 container create b47dc48cf94da9cebfaf83b36cae782de83c70a5563b48eb4d5328db305b71de (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-6-deactivate, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid, io.buildah.version=1.41.3) 2026-03-09T00:20:16.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:15 vm04.local ceph-mon[96438]: osd.6 marked itself down and dead 2026-03-09T00:20:16.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:15 vm04.local ceph-mon[94619]: osd.6 marked itself down and dead 2026-03-09T00:20:16.461 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:20:16 vm10 podman[98544]: 2026-03-09 00:20:16.195457168 +0000 UTC m=+0.061896801 container init b47dc48cf94da9cebfaf83b36cae782de83c70a5563b48eb4d5328db305b71de (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-6-deactivate, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=squid, org.label-schema.vendor=CentOS, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, OSD_FLAVOR=default) 2026-03-09T00:20:16.461 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:20:16 vm10 podman[98544]: 2026-03-09 00:20:16.203065608 +0000 UTC m=+0.069505241 container start b47dc48cf94da9cebfaf83b36cae782de83c70a5563b48eb4d5328db305b71de (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-6-deactivate, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223, OSD_FLAVOR=default, io.buildah.version=1.41.3, ceph=True, CEPH_REF=squid, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image) 2026-03-09T00:20:16.461 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:20:16 vm10 podman[98544]: 2026-03-09 00:20:16.207382087 +0000 UTC m=+0.073821720 container attach b47dc48cf94da9cebfaf83b36cae782de83c70a5563b48eb4d5328db305b71de (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-6-deactivate, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.build-date=20260223, CEPH_REF=squid, ceph=True, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.license=GPLv2, OSD_FLAVOR=default, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-09T00:20:16.461 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:20:16 vm10 podman[98544]: 2026-03-09 00:20:16.144614157 +0000 UTC m=+0.011053800 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc 2026-03-09T00:20:16.461 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:20:16 vm10 conmon[98556]: conmon b47dc48cf94da9cebfaf : Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-b47dc48cf94da9cebfaf83b36cae782de83c70a5563b48eb4d5328db305b71de.scope/container/memory.events 2026-03-09T00:20:16.461 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:20:16 vm10 podman[98544]: 2026-03-09 00:20:16.333286516 +0000 UTC m=+0.199726149 container died b47dc48cf94da9cebfaf83b36cae782de83c70a5563b48eb4d5328db305b71de (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-6-deactivate, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223, OSD_FLAVOR=default, CEPH_REF=squid, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-09T00:20:16.461 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:20:16 vm10 podman[98544]: 2026-03-09 00:20:16.359251984 +0000 UTC m=+0.225691617 container remove b47dc48cf94da9cebfaf83b36cae782de83c70a5563b48eb4d5328db305b71de (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-6-deactivate, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, OSD_FLAVOR=default, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-09T00:20:16.461 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:20:16 vm10 systemd[1]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@osd.6.service: Deactivated successfully. 2026-03-09T00:20:16.461 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:20:16 vm10 systemd[1]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@osd.6.service: Unit process 98556 (conmon) remains running after unit stopped. 2026-03-09T00:20:16.461 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:20:16 vm10 systemd[1]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@osd.6.service: Unit process 98565 (podman) remains running after unit stopped. 2026-03-09T00:20:16.461 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:20:16 vm10 systemd[1]: Stopped Ceph osd.6 for fdcbddf6-1b49-11f1-80b0-7392062373f9. 2026-03-09T00:20:16.461 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:20:16 vm10 systemd[1]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@osd.6.service: Consumed 29.865s CPU time, 184.6M memory peak. 2026-03-09T00:20:16.808 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:20:16 vm10 systemd[1]: Starting Ceph osd.6 for fdcbddf6-1b49-11f1-80b0-7392062373f9... 2026-03-09T00:20:16.809 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:20:16 vm10 podman[98654]: 2026-03-09 00:20:16.67078234 +0000 UTC m=+0.019484098 container create 0b955bd11e7facc0a8fe1052644f1d8151d04d1c811dc8291b47fc7a54ebf07f (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-6-activate, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, CEPH_REF=squid, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.authors=Ceph Release Team , FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True) 2026-03-09T00:20:16.809 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:20:16 vm10 podman[98654]: 2026-03-09 00:20:16.727807652 +0000 UTC m=+0.076509410 container init 0b955bd11e7facc0a8fe1052644f1d8151d04d1c811dc8291b47fc7a54ebf07f (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-6-activate, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, CEPH_REF=squid, org.label-schema.schema-version=1.0, ceph=True, org.label-schema.build-date=20260223, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-09T00:20:16.809 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:20:16 vm10 podman[98654]: 2026-03-09 00:20:16.731201895 +0000 UTC m=+0.079903653 container start 0b955bd11e7facc0a8fe1052644f1d8151d04d1c811dc8291b47fc7a54ebf07f (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-6-activate, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team , GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.build-date=20260223, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=squid) 2026-03-09T00:20:16.809 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:20:16 vm10 podman[98654]: 2026-03-09 00:20:16.74605231 +0000 UTC m=+0.094754068 container attach 0b955bd11e7facc0a8fe1052644f1d8151d04d1c811dc8291b47fc7a54ebf07f (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-6-activate, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team , OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, CEPH_REF=squid, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.build-date=20260223, ceph=True, io.buildah.version=1.41.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df) 2026-03-09T00:20:16.809 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:20:16 vm10 podman[98654]: 2026-03-09 00:20:16.662543851 +0000 UTC m=+0.011245609 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc 2026-03-09T00:20:17.080 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:16 vm10 ceph-mon[82076]: pgmap v140: 161 pgs: 161 active+clean; 457 KiB data, 231 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:20:17.080 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:16 vm10 ceph-mon[82076]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-09T00:20:17.080 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:16 vm10 ceph-mon[82076]: osdmap e128: 8 total, 7 up, 8 in 2026-03-09T00:20:17.080 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:20:16 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-6-activate[98668]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T00:20:17.080 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:20:16 vm10 bash[98654]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T00:20:17.080 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:20:16 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-6-activate[98668]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T00:20:17.080 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:20:16 vm10 bash[98654]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T00:20:17.351 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:20:17 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:20:17.019Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:20:17.351 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:20:17 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:20:17.020Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:20:17.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:16 vm04.local ceph-mon[96438]: pgmap v140: 161 pgs: 161 active+clean; 457 KiB data, 231 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:20:17.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:16 vm04.local ceph-mon[96438]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-09T00:20:17.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:16 vm04.local ceph-mon[96438]: osdmap e128: 8 total, 7 up, 8 in 2026-03-09T00:20:17.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:16 vm04.local ceph-mon[94619]: pgmap v140: 161 pgs: 161 active+clean; 457 KiB data, 231 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:20:17.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:16 vm04.local ceph-mon[94619]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-09T00:20:17.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:16 vm04.local ceph-mon[94619]: osdmap e128: 8 total, 7 up, 8 in 2026-03-09T00:20:17.579 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:20:17 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-6-activate[98668]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-09T00:20:17.579 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:20:17 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-6-activate[98668]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T00:20:17.579 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:20:17 vm10 bash[98654]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-09T00:20:17.579 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:20:17 vm10 bash[98654]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T00:20:17.579 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:20:17 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-6-activate[98668]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T00:20:17.579 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:20:17 vm10 bash[98654]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T00:20:17.579 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:20:17 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-6-activate[98668]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-6 2026-03-09T00:20:17.579 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:20:17 vm10 bash[98654]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-6 2026-03-09T00:20:17.579 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:20:17 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-6-activate[98668]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-39c5b10f-687e-49dc-b6ce-4a08b1b897a7/osd-block-e5c2cd5a-74db-44b2-8a4f-525ffaba40f9 --path /var/lib/ceph/osd/ceph-6 --no-mon-config 2026-03-09T00:20:17.579 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:20:17 vm10 bash[98654]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-39c5b10f-687e-49dc-b6ce-4a08b1b897a7/osd-block-e5c2cd5a-74db-44b2-8a4f-525ffaba40f9 --path /var/lib/ceph/osd/ceph-6 --no-mon-config 2026-03-09T00:20:17.938 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:20:17 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-6-activate[98668]: Running command: /usr/bin/ln -snf /dev/ceph-39c5b10f-687e-49dc-b6ce-4a08b1b897a7/osd-block-e5c2cd5a-74db-44b2-8a4f-525ffaba40f9 /var/lib/ceph/osd/ceph-6/block 2026-03-09T00:20:17.938 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:20:17 vm10 bash[98654]: Running command: /usr/bin/ln -snf /dev/ceph-39c5b10f-687e-49dc-b6ce-4a08b1b897a7/osd-block-e5c2cd5a-74db-44b2-8a4f-525ffaba40f9 /var/lib/ceph/osd/ceph-6/block 2026-03-09T00:20:17.938 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:20:17 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-6-activate[98668]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-6/block 2026-03-09T00:20:17.938 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:20:17 vm10 bash[98654]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-6/block 2026-03-09T00:20:17.938 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:20:17 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-6-activate[98668]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-2 2026-03-09T00:20:17.938 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:20:17 vm10 bash[98654]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-2 2026-03-09T00:20:17.938 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:20:17 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-6-activate[98668]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-6 2026-03-09T00:20:17.938 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:20:17 vm10 bash[98654]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-6 2026-03-09T00:20:17.938 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:20:17 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-6-activate[98668]: --> ceph-volume lvm activate successful for osd ID: 6 2026-03-09T00:20:17.938 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:20:17 vm10 bash[98654]: --> ceph-volume lvm activate successful for osd ID: 6 2026-03-09T00:20:17.938 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:20:17 vm10 podman[98654]: 2026-03-09 00:20:17.679442621 +0000 UTC m=+1.028144379 container died 0b955bd11e7facc0a8fe1052644f1d8151d04d1c811dc8291b47fc7a54ebf07f (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-6-activate, org.label-schema.license=GPLv2, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team , OSD_FLAVOR=default) 2026-03-09T00:20:17.938 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:20:17 vm10 podman[98654]: 2026-03-09 00:20:17.697096032 +0000 UTC m=+1.045797790 container remove 0b955bd11e7facc0a8fe1052644f1d8151d04d1c811dc8291b47fc7a54ebf07f (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-6-activate, org.label-schema.schema-version=1.0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS, ceph=True, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-09T00:20:17.939 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:17 vm10 ceph-mon[82076]: osdmap e129: 8 total, 7 up, 8 in 2026-03-09T00:20:17.939 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:17 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:17.939 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:17 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:17.939 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:17 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:20:17.939 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:20:17 vm10 podman[98918]: 2026-03-09 00:20:17.792135744 +0000 UTC m=+0.016260506 container create d7d72f87911e430f122796c773eafe454c62ab1a31a005422e5aa62985e73b9a (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-6, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid, org.label-schema.vendor=CentOS, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.schema-version=1.0, org.label-schema.build-date=20260223, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git) 2026-03-09T00:20:17.939 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:20:17 vm10 podman[98918]: 2026-03-09 00:20:17.82833186 +0000 UTC m=+0.052456622 container init d7d72f87911e430f122796c773eafe454c62ab1a31a005422e5aa62985e73b9a (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-6, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=squid, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, org.opencontainers.image.authors=Ceph Release Team , ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.schema-version=1.0, org.label-schema.build-date=20260223, org.label-schema.vendor=CentOS) 2026-03-09T00:20:17.939 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:20:17 vm10 podman[98918]: 2026-03-09 00:20:17.831478398 +0000 UTC m=+0.055603161 container start d7d72f87911e430f122796c773eafe454c62ab1a31a005422e5aa62985e73b9a (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-6, org.opencontainers.image.authors=Ceph Release Team , ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.build-date=20260223, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3) 2026-03-09T00:20:17.939 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:20:17 vm10 bash[98918]: d7d72f87911e430f122796c773eafe454c62ab1a31a005422e5aa62985e73b9a 2026-03-09T00:20:17.939 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:20:17 vm10 podman[98918]: 2026-03-09 00:20:17.785631529 +0000 UTC m=+0.009756291 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc 2026-03-09T00:20:17.939 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:20:17 vm10 systemd[1]: Started Ceph osd.6 for fdcbddf6-1b49-11f1-80b0-7392062373f9. 2026-03-09T00:20:18.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:17 vm04.local ceph-mon[94619]: osdmap e129: 8 total, 7 up, 8 in 2026-03-09T00:20:18.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:17 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:18.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:17 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:18.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:17 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:20:18.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:17 vm04.local ceph-mon[96438]: osdmap e129: 8 total, 7 up, 8 in 2026-03-09T00:20:18.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:17 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:18.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:17 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:18.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:17 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:20:19.117 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:18 vm10 ceph-mon[82076]: pgmap v143: 161 pgs: 7 active+undersized, 15 peering, 6 stale+active+clean, 3 active+undersized+degraded, 130 active+clean; 457 KiB data, 231 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s; 16/723 objects degraded (2.213%) 2026-03-09T00:20:19.117 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:18 vm10 ceph-mon[82076]: Health check failed: Reduced data availability: 1 pg peering (PG_AVAILABILITY) 2026-03-09T00:20:19.117 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:18 vm10 ceph-mon[82076]: Health check failed: Degraded data redundancy: 16/723 objects degraded (2.213%), 3 pgs degraded (PG_DEGRADED) 2026-03-09T00:20:19.117 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:20:18 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-6[98928]: 2026-03-09T00:20:18.920+0000 7f303386c740 -1 Falling back to public interface 2026-03-09T00:20:19.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:18 vm04.local ceph-mon[96438]: pgmap v143: 161 pgs: 7 active+undersized, 15 peering, 6 stale+active+clean, 3 active+undersized+degraded, 130 active+clean; 457 KiB data, 231 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s; 16/723 objects degraded (2.213%) 2026-03-09T00:20:19.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:18 vm04.local ceph-mon[96438]: Health check failed: Reduced data availability: 1 pg peering (PG_AVAILABILITY) 2026-03-09T00:20:19.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:18 vm04.local ceph-mon[96438]: Health check failed: Degraded data redundancy: 16/723 objects degraded (2.213%), 3 pgs degraded (PG_DEGRADED) 2026-03-09T00:20:19.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:18 vm04.local ceph-mon[94619]: pgmap v143: 161 pgs: 7 active+undersized, 15 peering, 6 stale+active+clean, 3 active+undersized+degraded, 130 active+clean; 457 KiB data, 231 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s; 16/723 objects degraded (2.213%) 2026-03-09T00:20:19.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:18 vm04.local ceph-mon[94619]: Health check failed: Reduced data availability: 1 pg peering (PG_AVAILABILITY) 2026-03-09T00:20:19.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:18 vm04.local ceph-mon[94619]: Health check failed: Degraded data redundancy: 16/723 objects degraded (2.213%), 3 pgs degraded (PG_DEGRADED) 2026-03-09T00:20:20.049 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:20:19 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-6[98928]: 2026-03-09T00:20:19.781+0000 7f303386c740 -1 osd.6 0 read_superblock omap replica is missing. 2026-03-09T00:20:20.049 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:20:19 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-6[98928]: 2026-03-09T00:20:19.822+0000 7f303386c740 -1 osd.6 127 log_to_monitors true 2026-03-09T00:20:20.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:20 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:20.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:20 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:20.329 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:20 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:20.329 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:20 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:20.329 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:20 vm10 ceph-mon[82076]: from='osd.6 [v2:192.168.123.110:6816/2212508500,v1:192.168.123.110:6817/2212508500]' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-09T00:20:20.329 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:20 vm10 ceph-mon[82076]: from='osd.6 ' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-09T00:20:20.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:20 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:20.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:20 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:20.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:20 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:20.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:20 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:20.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:20 vm04.local ceph-mon[96438]: from='osd.6 [v2:192.168.123.110:6816/2212508500,v1:192.168.123.110:6817/2212508500]' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-09T00:20:20.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:20 vm04.local ceph-mon[96438]: from='osd.6 ' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-09T00:20:20.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:20 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:20.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:20 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:20.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:20 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:20.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:20 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:20.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:20 vm04.local ceph-mon[94619]: from='osd.6 [v2:192.168.123.110:6816/2212508500,v1:192.168.123.110:6817/2212508500]' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-09T00:20:20.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:20 vm04.local ceph-mon[94619]: from='osd.6 ' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-09T00:20:20.828 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:20:20 vm10 ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-6[98928]: 2026-03-09T00:20:20.650+0000 7f302b617640 -1 osd.6 127 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-09T00:20:21.100 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:20:20 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: 2026-03-09T00:20:20.603+0000 7fe15b141640 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (17 PGs are or would become offline) 2026-03-09T00:20:21.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:21 vm10 ceph-mon[82076]: pgmap v144: 161 pgs: 17 active+undersized, 15 peering, 1 stale+active+clean, 10 active+undersized+degraded, 118 active+clean; 457 KiB data, 231 MiB used, 160 GiB / 160 GiB avail; 44/723 objects degraded (6.086%) 2026-03-09T00:20:21.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:21 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:21.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:21 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:21.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:21 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:20:21.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:21 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:20:21.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:21 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:21.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:21 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:20:21.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:21 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:20:21.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:21 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:20:21.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:21 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:20:21.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:21 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["7"], "max": 16}]: dispatch 2026-03-09T00:20:21.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:21 vm10 ceph-mon[82076]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["7"], "max": 16}]: dispatch 2026-03-09T00:20:21.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:21 vm10 ceph-mon[82076]: Upgrade: unsafe to stop osd(s) at this time (17 PGs are or would become offline) 2026-03-09T00:20:21.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:21 vm10 ceph-mon[82076]: from='osd.6 ' entity='osd.6' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]': finished 2026-03-09T00:20:21.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:21 vm10 ceph-mon[82076]: osdmap e130: 8 total, 7 up, 8 in 2026-03-09T00:20:21.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:21 vm10 ceph-mon[82076]: from='osd.6 [v2:192.168.123.110:6816/2212508500,v1:192.168.123.110:6817/2212508500]' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm10", "root=default"]}]: dispatch 2026-03-09T00:20:21.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:21 vm10 ceph-mon[82076]: from='osd.6 ' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm10", "root=default"]}]: dispatch 2026-03-09T00:20:21.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:21 vm04.local ceph-mon[96438]: pgmap v144: 161 pgs: 17 active+undersized, 15 peering, 1 stale+active+clean, 10 active+undersized+degraded, 118 active+clean; 457 KiB data, 231 MiB used, 160 GiB / 160 GiB avail; 44/723 objects degraded (6.086%) 2026-03-09T00:20:21.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:21 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:21.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:21 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:21.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:21 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:20:21.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:21 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:20:21.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:21 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:21.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:21 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:20:21.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:21 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:20:21.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:21 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:20:21.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:21 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:20:21.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:21 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["7"], "max": 16}]: dispatch 2026-03-09T00:20:21.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:21 vm04.local ceph-mon[96438]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["7"], "max": 16}]: dispatch 2026-03-09T00:20:21.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:21 vm04.local ceph-mon[96438]: Upgrade: unsafe to stop osd(s) at this time (17 PGs are or would become offline) 2026-03-09T00:20:21.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:21 vm04.local ceph-mon[96438]: from='osd.6 ' entity='osd.6' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]': finished 2026-03-09T00:20:21.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:21 vm04.local ceph-mon[96438]: osdmap e130: 8 total, 7 up, 8 in 2026-03-09T00:20:21.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:21 vm04.local ceph-mon[96438]: from='osd.6 [v2:192.168.123.110:6816/2212508500,v1:192.168.123.110:6817/2212508500]' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm10", "root=default"]}]: dispatch 2026-03-09T00:20:21.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:21 vm04.local ceph-mon[96438]: from='osd.6 ' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm10", "root=default"]}]: dispatch 2026-03-09T00:20:21.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:21 vm04.local ceph-mon[94619]: pgmap v144: 161 pgs: 17 active+undersized, 15 peering, 1 stale+active+clean, 10 active+undersized+degraded, 118 active+clean; 457 KiB data, 231 MiB used, 160 GiB / 160 GiB avail; 44/723 objects degraded (6.086%) 2026-03-09T00:20:21.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:21 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:21.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:21 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:21.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:21 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:20:21.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:21 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:20:21.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:21 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:21.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:21 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:20:21.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:21 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:20:21.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:21 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:20:21.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:21 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:20:21.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:21 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["7"], "max": 16}]: dispatch 2026-03-09T00:20:21.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:21 vm04.local ceph-mon[94619]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["7"], "max": 16}]: dispatch 2026-03-09T00:20:21.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:21 vm04.local ceph-mon[94619]: Upgrade: unsafe to stop osd(s) at this time (17 PGs are or would become offline) 2026-03-09T00:20:21.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:21 vm04.local ceph-mon[94619]: from='osd.6 ' entity='osd.6' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]': finished 2026-03-09T00:20:21.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:21 vm04.local ceph-mon[94619]: osdmap e130: 8 total, 7 up, 8 in 2026-03-09T00:20:21.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:21 vm04.local ceph-mon[94619]: from='osd.6 [v2:192.168.123.110:6816/2212508500,v1:192.168.123.110:6817/2212508500]' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm10", "root=default"]}]: dispatch 2026-03-09T00:20:21.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:21 vm04.local ceph-mon[94619]: from='osd.6 ' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm10", "root=default"]}]: dispatch 2026-03-09T00:20:22.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:22 vm10 ceph-mon[82076]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-09T00:20:22.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:22 vm10 ceph-mon[82076]: osd.6 [v2:192.168.123.110:6816/2212508500,v1:192.168.123.110:6817/2212508500] boot 2026-03-09T00:20:22.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:22 vm10 ceph-mon[82076]: osdmap e131: 8 total, 8 up, 8 in 2026-03-09T00:20:22.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:22 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T00:20:22.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:22 vm04.local ceph-mon[96438]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-09T00:20:22.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:22 vm04.local ceph-mon[96438]: osd.6 [v2:192.168.123.110:6816/2212508500,v1:192.168.123.110:6817/2212508500] boot 2026-03-09T00:20:22.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:22 vm04.local ceph-mon[96438]: osdmap e131: 8 total, 8 up, 8 in 2026-03-09T00:20:22.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:22 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T00:20:22.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:22 vm04.local ceph-mon[94619]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-09T00:20:22.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:22 vm04.local ceph-mon[94619]: osd.6 [v2:192.168.123.110:6816/2212508500,v1:192.168.123.110:6817/2212508500] boot 2026-03-09T00:20:22.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:22 vm04.local ceph-mon[94619]: osdmap e131: 8 total, 8 up, 8 in 2026-03-09T00:20:22.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:22 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T00:20:23.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:23 vm10 ceph-mon[82076]: pgmap v147: 161 pgs: 36 active+undersized, 17 active+undersized+degraded, 108 active+clean; 457 KiB data, 249 MiB used, 160 GiB / 160 GiB avail; 84/723 objects degraded (11.618%) 2026-03-09T00:20:23.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:23 vm10 ceph-mon[82076]: Health check cleared: PG_AVAILABILITY (was: Reduced data availability: 1 pg peering) 2026-03-09T00:20:23.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:23 vm10 ceph-mon[82076]: osdmap e132: 8 total, 8 up, 8 in 2026-03-09T00:20:23.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:23 vm04.local ceph-mon[96438]: pgmap v147: 161 pgs: 36 active+undersized, 17 active+undersized+degraded, 108 active+clean; 457 KiB data, 249 MiB used, 160 GiB / 160 GiB avail; 84/723 objects degraded (11.618%) 2026-03-09T00:20:23.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:23 vm04.local ceph-mon[96438]: Health check cleared: PG_AVAILABILITY (was: Reduced data availability: 1 pg peering) 2026-03-09T00:20:23.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:23 vm04.local ceph-mon[96438]: osdmap e132: 8 total, 8 up, 8 in 2026-03-09T00:20:23.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:23 vm04.local ceph-mon[94619]: pgmap v147: 161 pgs: 36 active+undersized, 17 active+undersized+degraded, 108 active+clean; 457 KiB data, 249 MiB used, 160 GiB / 160 GiB avail; 84/723 objects degraded (11.618%) 2026-03-09T00:20:23.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:23 vm04.local ceph-mon[94619]: Health check cleared: PG_AVAILABILITY (was: Reduced data availability: 1 pg peering) 2026-03-09T00:20:23.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:23 vm04.local ceph-mon[94619]: osdmap e132: 8 total, 8 up, 8 in 2026-03-09T00:20:24.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:24 vm10 ceph-mon[82076]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:20:24.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:24 vm04.local ceph-mon[96438]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:20:24.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:24 vm04.local ceph-mon[94619]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:20:25.350 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:20:25 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:20:25] "GET /metrics HTTP/1.1" 200 37856 "" "Prometheus/2.51.0" 2026-03-09T00:20:25.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:25 vm10 ceph-mon[82076]: pgmap v149: 161 pgs: 26 active+undersized, 11 active+undersized+degraded, 124 active+clean; 457 KiB data, 249 MiB used, 160 GiB / 160 GiB avail; 55/723 objects degraded (7.607%) 2026-03-09T00:20:25.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:25 vm10 ceph-mon[82076]: Health check update: Degraded data redundancy: 55/723 objects degraded (7.607%), 11 pgs degraded (PG_DEGRADED) 2026-03-09T00:20:25.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:25 vm04.local ceph-mon[96438]: pgmap v149: 161 pgs: 26 active+undersized, 11 active+undersized+degraded, 124 active+clean; 457 KiB data, 249 MiB used, 160 GiB / 160 GiB avail; 55/723 objects degraded (7.607%) 2026-03-09T00:20:25.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:25 vm04.local ceph-mon[96438]: Health check update: Degraded data redundancy: 55/723 objects degraded (7.607%), 11 pgs degraded (PG_DEGRADED) 2026-03-09T00:20:25.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:25 vm04.local ceph-mon[94619]: pgmap v149: 161 pgs: 26 active+undersized, 11 active+undersized+degraded, 124 active+clean; 457 KiB data, 249 MiB used, 160 GiB / 160 GiB avail; 55/723 objects degraded (7.607%) 2026-03-09T00:20:25.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:25 vm04.local ceph-mon[94619]: Health check update: Degraded data redundancy: 55/723 objects degraded (7.607%), 11 pgs degraded (PG_DEGRADED) 2026-03-09T00:20:27.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:20:27 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:20:27.020Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:20:27.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:20:27 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:20:27.021Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:20:27.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:27 vm10 ceph-mon[82076]: pgmap v150: 161 pgs: 6 active+undersized, 3 active+undersized+degraded, 152 active+clean; 457 KiB data, 253 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 0 op/s; 17/723 objects degraded (2.351%) 2026-03-09T00:20:27.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:27 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:27.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:27 vm10 ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:20:27.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:27 vm04.local ceph-mon[96438]: pgmap v150: 161 pgs: 6 active+undersized, 3 active+undersized+degraded, 152 active+clean; 457 KiB data, 253 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 0 op/s; 17/723 objects degraded (2.351%) 2026-03-09T00:20:27.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:27 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:27.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:27 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:20:27.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:27 vm04.local ceph-mon[94619]: pgmap v150: 161 pgs: 6 active+undersized, 3 active+undersized+degraded, 152 active+clean; 457 KiB data, 253 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 0 op/s; 17/723 objects degraded (2.351%) 2026-03-09T00:20:27.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:27 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:27.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:27 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:20:28.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:28 vm04.local ceph-mon[96438]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 17/723 objects degraded (2.351%), 3 pgs degraded) 2026-03-09T00:20:28.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:28 vm04.local ceph-mon[96438]: Cluster is now healthy 2026-03-09T00:20:28.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:28 vm04.local ceph-mon[94619]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 17/723 objects degraded (2.351%), 3 pgs degraded) 2026-03-09T00:20:28.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:28 vm04.local ceph-mon[94619]: Cluster is now healthy 2026-03-09T00:20:28.874 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:28 vm10 ceph-mon[82076]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 17/723 objects degraded (2.351%), 3 pgs degraded) 2026-03-09T00:20:28.874 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:28 vm10 ceph-mon[82076]: Cluster is now healthy 2026-03-09T00:20:30.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:29 vm10.local ceph-mon[82076]: pgmap v151: 161 pgs: 161 active+clean; 457 KiB data, 253 MiB used, 160 GiB / 160 GiB avail; 562 B/s rd, 0 op/s 2026-03-09T00:20:30.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:29 vm04.local ceph-mon[96438]: pgmap v151: 161 pgs: 161 active+clean; 457 KiB data, 253 MiB used, 160 GiB / 160 GiB avail; 562 B/s rd, 0 op/s 2026-03-09T00:20:30.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:29 vm04.local ceph-mon[94619]: pgmap v151: 161 pgs: 161 active+clean; 457 KiB data, 253 MiB used, 160 GiB / 160 GiB avail; 562 B/s rd, 0 op/s 2026-03-09T00:20:32.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:31 vm10.local ceph-mon[82076]: pgmap v152: 161 pgs: 161 active+clean; 457 KiB data, 253 MiB used, 160 GiB / 160 GiB avail; 494 B/s rd, 0 op/s 2026-03-09T00:20:32.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:31 vm04.local ceph-mon[96438]: pgmap v152: 161 pgs: 161 active+clean; 457 KiB data, 253 MiB used, 160 GiB / 160 GiB avail; 494 B/s rd, 0 op/s 2026-03-09T00:20:32.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:31 vm04.local ceph-mon[94619]: pgmap v152: 161 pgs: 161 active+clean; 457 KiB data, 253 MiB used, 160 GiB / 160 GiB avail; 494 B/s rd, 0 op/s 2026-03-09T00:20:34.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:33 vm10.local ceph-mon[82076]: pgmap v153: 161 pgs: 161 active+clean; 457 KiB data, 253 MiB used, 160 GiB / 160 GiB avail; 921 B/s rd, 0 op/s 2026-03-09T00:20:34.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:33 vm04.local ceph-mon[96438]: pgmap v153: 161 pgs: 161 active+clean; 457 KiB data, 253 MiB used, 160 GiB / 160 GiB avail; 921 B/s rd, 0 op/s 2026-03-09T00:20:34.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:33 vm04.local ceph-mon[94619]: pgmap v153: 161 pgs: 161 active+clean; 457 KiB data, 253 MiB used, 160 GiB / 160 GiB avail; 921 B/s rd, 0 op/s 2026-03-09T00:20:34.979 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:34 vm10.local ceph-mon[82076]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:20:35.021 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:34 vm04.local ceph-mon[96438]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:20:35.021 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:34 vm04.local ceph-mon[94619]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:20:35.351 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:20:35 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:20:35] "GET /metrics HTTP/1.1" 200 37873 "" "Prometheus/2.51.0" 2026-03-09T00:20:35.904 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:35 vm10.local ceph-mon[82076]: pgmap v154: 161 pgs: 161 active+clean; 457 KiB data, 253 MiB used, 160 GiB / 160 GiB avail; 817 B/s rd, 0 op/s 2026-03-09T00:20:35.904 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:35 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["7"], "max": 16}]: dispatch 2026-03-09T00:20:36.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:35 vm04.local ceph-mon[96438]: pgmap v154: 161 pgs: 161 active+clean; 457 KiB data, 253 MiB used, 160 GiB / 160 GiB avail; 817 B/s rd, 0 op/s 2026-03-09T00:20:36.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:35 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["7"], "max": 16}]: dispatch 2026-03-09T00:20:36.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:35 vm04.local ceph-mon[94619]: pgmap v154: 161 pgs: 161 active+clean; 457 KiB data, 253 MiB used, 160 GiB / 160 GiB avail; 817 B/s rd, 0 op/s 2026-03-09T00:20:36.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:35 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["7"], "max": 16}]: dispatch 2026-03-09T00:20:36.695 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:36 vm10.local ceph-mon[82076]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["7"], "max": 16}]: dispatch 2026-03-09T00:20:36.695 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:36 vm10.local ceph-mon[82076]: Upgrade: osd.7 is safe to restart 2026-03-09T00:20:36.695 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:36 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:36.695 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:36 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.7"}]: dispatch 2026-03-09T00:20:36.695 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:36 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:20:37.021 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:36 vm04.local ceph-mon[96438]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["7"], "max": 16}]: dispatch 2026-03-09T00:20:37.021 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:36 vm04.local ceph-mon[96438]: Upgrade: osd.7 is safe to restart 2026-03-09T00:20:37.021 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:36 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:37.021 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:36 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.7"}]: dispatch 2026-03-09T00:20:37.021 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:36 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:20:37.021 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:36 vm04.local ceph-mon[94619]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["7"], "max": 16}]: dispatch 2026-03-09T00:20:37.021 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:36 vm04.local ceph-mon[94619]: Upgrade: osd.7 is safe to restart 2026-03-09T00:20:37.021 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:36 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:37.021 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:36 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.7"}]: dispatch 2026-03-09T00:20:37.021 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:36 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:20:37.078 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:20:36 vm10.local systemd[1]: Stopping Ceph osd.7 for fdcbddf6-1b49-11f1-80b0-7392062373f9... 2026-03-09T00:20:37.079 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:20:36 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-7[60773]: 2026-03-09T00:20:36.800+0000 7f4c290d5700 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.7 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-09T00:20:37.079 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:20:36 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-7[60773]: 2026-03-09T00:20:36.800+0000 7f4c290d5700 -1 osd.7 132 *** Got signal Terminated *** 2026-03-09T00:20:37.079 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:20:36 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-7[60773]: 2026-03-09T00:20:36.800+0000 7f4c290d5700 -1 osd.7 132 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-09T00:20:37.351 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:20:37 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:20:37.020Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:20:37.351 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:20:37 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:20:37.021Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:20:37.920 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:37 vm10.local ceph-mon[82076]: pgmap v155: 161 pgs: 161 active+clean; 457 KiB data, 253 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:20:37.920 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:37 vm10.local ceph-mon[82076]: Upgrade: Updating osd.7 2026-03-09T00:20:37.920 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:37 vm10.local ceph-mon[82076]: Deploying daemon osd.7 on vm10 2026-03-09T00:20:37.920 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:37 vm10.local ceph-mon[82076]: osd.7 marked itself down and dead 2026-03-09T00:20:37.920 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:37 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:37.921 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:20:37 vm10.local podman[102642]: 2026-03-09 00:20:37.728360586 +0000 UTC m=+0.945841027 container died bc6bbac150799f5aef9865b2898dbcaa491fe970be78b5b9748b0a237e6768ea (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-7, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, io.k8s.display-name=CentOS Stream 8, io.openshift.expose-services=, CEPH_POINT_RELEASE=-17.2.0, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, distribution-scope=public, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., name=centos-stream, GIT_CLEAN=True, build-date=2022-05-03T08:36:31.336870, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, vcs-type=git, io.buildah.version=1.19.8, io.openshift.tags=base centos centos-stream, maintainer=Guillaume Abrioux , GIT_BRANCH=HEAD, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, vendor=Red Hat, Inc., GIT_REPO=https://github.com/ceph/ceph-container.git, architecture=x86_64, ceph=True, com.redhat.component=centos-stream-container, version=8, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, release=754, RELEASE=HEAD) 2026-03-09T00:20:37.921 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:20:37 vm10.local podman[102642]: 2026-03-09 00:20:37.745096339 +0000 UTC m=+0.962576780 container remove bc6bbac150799f5aef9865b2898dbcaa491fe970be78b5b9748b0a237e6768ea (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-7, GIT_BRANCH=HEAD, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, RELEASE=HEAD, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, com.redhat.component=centos-stream-container, io.buildah.version=1.19.8, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, distribution-scope=public, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., maintainer=Guillaume Abrioux , GIT_REPO=https://github.com/ceph/ceph-container.git, version=8, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, io.openshift.tags=base centos centos-stream, GIT_CLEAN=True, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, build-date=2022-05-03T08:36:31.336870, architecture=x86_64, vendor=Red Hat, Inc., io.openshift.expose-services=, name=centos-stream, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, ceph=True, io.k8s.display-name=CentOS Stream 8, release=754, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, vcs-type=git, CEPH_POINT_RELEASE=-17.2.0) 2026-03-09T00:20:37.921 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:20:37 vm10.local bash[102642]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-7 2026-03-09T00:20:37.921 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:20:37 vm10.local podman[102709]: 2026-03-09 00:20:37.898141906 +0000 UTC m=+0.018972450 container create db1e58d3c3dc632cdd960f9050dad8051aa79e956ce71924cb344dfc9fa7c72a (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-7-deactivate, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team , FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=squid, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git) 2026-03-09T00:20:38.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:37 vm04.local ceph-mon[96438]: pgmap v155: 161 pgs: 161 active+clean; 457 KiB data, 253 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:20:38.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:37 vm04.local ceph-mon[96438]: Upgrade: Updating osd.7 2026-03-09T00:20:38.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:37 vm04.local ceph-mon[96438]: Deploying daemon osd.7 on vm10 2026-03-09T00:20:38.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:37 vm04.local ceph-mon[96438]: osd.7 marked itself down and dead 2026-03-09T00:20:38.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:37 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:38.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:37 vm04.local ceph-mon[94619]: pgmap v155: 161 pgs: 161 active+clean; 457 KiB data, 253 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:20:38.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:37 vm04.local ceph-mon[94619]: Upgrade: Updating osd.7 2026-03-09T00:20:38.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:37 vm04.local ceph-mon[94619]: Deploying daemon osd.7 on vm10 2026-03-09T00:20:38.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:37 vm04.local ceph-mon[94619]: osd.7 marked itself down and dead 2026-03-09T00:20:38.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:37 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:38.213 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:20:37 vm10.local podman[102709]: 2026-03-09 00:20:37.93830597 +0000 UTC m=+0.059136524 container init db1e58d3c3dc632cdd960f9050dad8051aa79e956ce71924cb344dfc9fa7c72a (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-7-deactivate, CEPH_REF=squid, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.schema-version=1.0, ceph=True, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.license=GPLv2, org.label-schema.build-date=20260223, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, io.buildah.version=1.41.3) 2026-03-09T00:20:38.213 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:20:37 vm10.local podman[102709]: 2026-03-09 00:20:37.941308039 +0000 UTC m=+0.062138583 container start db1e58d3c3dc632cdd960f9050dad8051aa79e956ce71924cb344dfc9fa7c72a (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-7-deactivate, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.41.3, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.build-date=20260223, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df) 2026-03-09T00:20:38.213 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:20:37 vm10.local podman[102709]: 2026-03-09 00:20:37.947208031 +0000 UTC m=+0.068038565 container attach db1e58d3c3dc632cdd960f9050dad8051aa79e956ce71924cb344dfc9fa7c72a (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-7-deactivate, org.label-schema.build-date=20260223, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.vendor=CentOS, ceph=True, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0) 2026-03-09T00:20:38.213 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:20:37 vm10.local podman[102709]: 2026-03-09 00:20:37.890473213 +0000 UTC m=+0.011303768 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc 2026-03-09T00:20:38.213 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:20:38 vm10.local podman[102728]: 2026-03-09 00:20:38.08773491 +0000 UTC m=+0.011484046 container died db1e58d3c3dc632cdd960f9050dad8051aa79e956ce71924cb344dfc9fa7c72a (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-7-deactivate, ceph=True, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team , io.buildah.version=1.41.3, CEPH_REF=squid, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.build-date=20260223, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-09T00:20:38.213 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:20:38 vm10.local podman[102728]: 2026-03-09 00:20:38.109709378 +0000 UTC m=+0.033458514 container remove db1e58d3c3dc632cdd960f9050dad8051aa79e956ce71924cb344dfc9fa7c72a (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-7-deactivate, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=squid, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.build-date=20260223, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0) 2026-03-09T00:20:38.213 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:20:38 vm10.local systemd[1]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@osd.7.service: Deactivated successfully. 2026-03-09T00:20:38.213 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:20:38 vm10.local systemd[1]: Stopped Ceph osd.7 for fdcbddf6-1b49-11f1-80b0-7392062373f9. 2026-03-09T00:20:38.213 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:20:38 vm10.local systemd[1]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@osd.7.service: Consumed 8.354s CPU time. 2026-03-09T00:20:38.579 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:20:38 vm10.local systemd[1]: Starting Ceph osd.7 for fdcbddf6-1b49-11f1-80b0-7392062373f9... 2026-03-09T00:20:38.579 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:20:38 vm10.local podman[102816]: 2026-03-09 00:20:38.410222072 +0000 UTC m=+0.017174966 container create e461c83522d8849f7c9b5c6d365251d18d831da328a1638c7342397b206b5fff (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-7-activate, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3) 2026-03-09T00:20:38.579 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:20:38 vm10.local podman[102816]: 2026-03-09 00:20:38.448456915 +0000 UTC m=+0.055409809 container init e461c83522d8849f7c9b5c6d365251d18d831da328a1638c7342397b206b5fff (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-7-activate, CEPH_REF=squid, io.buildah.version=1.41.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, OSD_FLAVOR=default, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.authors=Ceph Release Team ) 2026-03-09T00:20:38.579 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:20:38 vm10.local podman[102816]: 2026-03-09 00:20:38.454448329 +0000 UTC m=+0.061401223 container start e461c83522d8849f7c9b5c6d365251d18d831da328a1638c7342397b206b5fff (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-7-activate, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, ceph=True, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git) 2026-03-09T00:20:38.579 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:20:38 vm10.local podman[102816]: 2026-03-09 00:20:38.455609231 +0000 UTC m=+0.062562125 container attach e461c83522d8849f7c9b5c6d365251d18d831da328a1638c7342397b206b5fff (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-7-activate, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=squid, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.license=GPLv2, org.label-schema.build-date=20260223, ceph=True, org.opencontainers.image.authors=Ceph Release Team ) 2026-03-09T00:20:38.579 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:20:38 vm10.local podman[102816]: 2026-03-09 00:20:38.403682653 +0000 UTC m=+0.010635557 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc 2026-03-09T00:20:38.579 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:20:38 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-7-activate[102827]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T00:20:38.579 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:20:38 vm10.local bash[102816]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T00:20:38.579 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:20:38 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-7-activate[102827]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T00:20:38.579 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:20:38 vm10.local bash[102816]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T00:20:38.950 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:38 vm10.local ceph-mon[82076]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-09T00:20:38.950 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:38 vm10.local ceph-mon[82076]: Health check failed: all OSDs are running squid or later but require_osd_release < squid (OSD_UPGRADE_FINISHED) 2026-03-09T00:20:38.950 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:38 vm10.local ceph-mon[82076]: osdmap e133: 8 total, 7 up, 8 in 2026-03-09T00:20:39.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:38 vm04.local ceph-mon[96438]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-09T00:20:39.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:38 vm04.local ceph-mon[96438]: Health check failed: all OSDs are running squid or later but require_osd_release < squid (OSD_UPGRADE_FINISHED) 2026-03-09T00:20:39.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:38 vm04.local ceph-mon[96438]: osdmap e133: 8 total, 7 up, 8 in 2026-03-09T00:20:39.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:38 vm04.local ceph-mon[94619]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-09T00:20:39.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:38 vm04.local ceph-mon[94619]: Health check failed: all OSDs are running squid or later but require_osd_release < squid (OSD_UPGRADE_FINISHED) 2026-03-09T00:20:39.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:38 vm04.local ceph-mon[94619]: osdmap e133: 8 total, 7 up, 8 in 2026-03-09T00:20:39.329 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:20:39 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-7-activate[102827]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-09T00:20:39.329 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:20:39 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-7-activate[102827]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T00:20:39.329 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:20:39 vm10.local bash[102816]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-09T00:20:39.329 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:20:39 vm10.local bash[102816]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T00:20:39.329 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:20:39 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-7-activate[102827]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T00:20:39.329 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:20:39 vm10.local bash[102816]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T00:20:39.329 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:20:39 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-7-activate[102827]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-7 2026-03-09T00:20:39.329 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:20:39 vm10.local bash[102816]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-7 2026-03-09T00:20:39.329 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:20:39 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-7-activate[102827]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-bdf1c5ff-05e4-405b-ba68-f42630589767/osd-block-c2a6868a-a44a-4a09-a55c-d1145ef3d398 --path /var/lib/ceph/osd/ceph-7 --no-mon-config 2026-03-09T00:20:39.329 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:20:39 vm10.local bash[102816]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-bdf1c5ff-05e4-405b-ba68-f42630589767/osd-block-c2a6868a-a44a-4a09-a55c-d1145ef3d398 --path /var/lib/ceph/osd/ceph-7 --no-mon-config 2026-03-09T00:20:39.680 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:39 vm10.local ceph-mon[82076]: pgmap v157: 161 pgs: 22 stale+active+clean, 139 active+clean; 457 KiB data, 253 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-09T00:20:39.680 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:39 vm10.local ceph-mon[82076]: osdmap e134: 8 total, 7 up, 8 in 2026-03-09T00:20:39.680 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:39 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:39.680 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:39 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:39.680 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:39 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:20:39.680 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:20:39 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-7-activate[102827]: Running command: /usr/bin/ln -snf /dev/ceph-bdf1c5ff-05e4-405b-ba68-f42630589767/osd-block-c2a6868a-a44a-4a09-a55c-d1145ef3d398 /var/lib/ceph/osd/ceph-7/block 2026-03-09T00:20:39.680 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:20:39 vm10.local bash[102816]: Running command: /usr/bin/ln -snf /dev/ceph-bdf1c5ff-05e4-405b-ba68-f42630589767/osd-block-c2a6868a-a44a-4a09-a55c-d1145ef3d398 /var/lib/ceph/osd/ceph-7/block 2026-03-09T00:20:39.680 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:20:39 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-7-activate[102827]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-7/block 2026-03-09T00:20:39.680 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:20:39 vm10.local bash[102816]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-7/block 2026-03-09T00:20:39.680 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:20:39 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-7-activate[102827]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-3 2026-03-09T00:20:39.680 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:20:39 vm10.local bash[102816]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-3 2026-03-09T00:20:39.680 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:20:39 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-7-activate[102827]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-7 2026-03-09T00:20:39.680 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:20:39 vm10.local bash[102816]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-7 2026-03-09T00:20:39.680 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:20:39 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-7-activate[102827]: --> ceph-volume lvm activate successful for osd ID: 7 2026-03-09T00:20:39.680 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:20:39 vm10.local bash[102816]: --> ceph-volume lvm activate successful for osd ID: 7 2026-03-09T00:20:39.680 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:20:39 vm10.local podman[103037]: 2026-03-09 00:20:39.450918143 +0000 UTC m=+0.012485730 container died e461c83522d8849f7c9b5c6d365251d18d831da328a1638c7342397b206b5fff (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-7-activate, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team , GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, CEPH_REF=squid, org.label-schema.build-date=20260223, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, ceph=True, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9) 2026-03-09T00:20:39.680 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:20:39 vm10.local podman[103037]: 2026-03-09 00:20:39.472522229 +0000 UTC m=+0.034089816 container remove e461c83522d8849f7c9b5c6d365251d18d831da328a1638c7342397b206b5fff (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-7-activate, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team ) 2026-03-09T00:20:39.680 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:20:39 vm10.local podman[103077]: 2026-03-09 00:20:39.570327876 +0000 UTC m=+0.015860366 container create 09c94bcb8e3f593c1c5b80112a36801775641a4ac8d7c51dd2c1f5d601627576 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-7, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-09T00:20:39.680 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:20:39 vm10.local podman[103077]: 2026-03-09 00:20:39.610231041 +0000 UTC m=+0.055763531 container init 09c94bcb8e3f593c1c5b80112a36801775641a4ac8d7c51dd2c1f5d601627576 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-7, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team , GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_REF=squid, org.label-schema.build-date=20260223, org.label-schema.vendor=CentOS, ceph=True, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image) 2026-03-09T00:20:39.680 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:20:39 vm10.local podman[103077]: 2026-03-09 00:20:39.613170182 +0000 UTC m=+0.058702672 container start 09c94bcb8e3f593c1c5b80112a36801775641a4ac8d7c51dd2c1f5d601627576 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-7, CEPH_REF=squid, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.build-date=20260223, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team , ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0) 2026-03-09T00:20:39.680 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:20:39 vm10.local bash[103077]: 09c94bcb8e3f593c1c5b80112a36801775641a4ac8d7c51dd2c1f5d601627576 2026-03-09T00:20:39.680 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:20:39 vm10.local podman[103077]: 2026-03-09 00:20:39.563693159 +0000 UTC m=+0.009225659 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc 2026-03-09T00:20:39.680 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:20:39 vm10.local systemd[1]: Started Ceph osd.7 for fdcbddf6-1b49-11f1-80b0-7392062373f9. 2026-03-09T00:20:40.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:39 vm04.local ceph-mon[96438]: pgmap v157: 161 pgs: 22 stale+active+clean, 139 active+clean; 457 KiB data, 253 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-09T00:20:40.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:39 vm04.local ceph-mon[96438]: osdmap e134: 8 total, 7 up, 8 in 2026-03-09T00:20:40.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:39 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:40.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:39 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:40.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:39 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:20:40.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:39 vm04.local ceph-mon[94619]: pgmap v157: 161 pgs: 22 stale+active+clean, 139 active+clean; 457 KiB data, 253 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-09T00:20:40.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:39 vm04.local ceph-mon[94619]: osdmap e134: 8 total, 7 up, 8 in 2026-03-09T00:20:40.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:39 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:40.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:39 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:40.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:39 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:20:40.705 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:20:40 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-7[103089]: 2026-03-09T00:20:40.449+0000 7faf2ad82740 -1 Falling back to public interface 2026-03-09T00:20:40.819 INFO:teuthology.orchestra.run.vm04.stdout:true 2026-03-09T00:20:40.991 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:40 vm10.local ceph-mon[82076]: Health check failed: Reduced data availability: 2 pgs peering (PG_AVAILABILITY) 2026-03-09T00:20:40.991 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:40 vm10.local ceph-mon[82076]: Health check failed: Degraded data redundancy: 15/723 objects degraded (2.075%), 8 pgs degraded (PG_DEGRADED) 2026-03-09T00:20:41.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:40 vm04.local ceph-mon[94619]: Health check failed: Reduced data availability: 2 pgs peering (PG_AVAILABILITY) 2026-03-09T00:20:41.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:40 vm04.local ceph-mon[94619]: Health check failed: Degraded data redundancy: 15/723 objects degraded (2.075%), 8 pgs degraded (PG_DEGRADED) 2026-03-09T00:20:41.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:40 vm04.local ceph-mon[96438]: Health check failed: Reduced data availability: 2 pgs peering (PG_AVAILABILITY) 2026-03-09T00:20:41.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:40 vm04.local ceph-mon[96438]: Health check failed: Degraded data redundancy: 15/723 objects degraded (2.075%), 8 pgs degraded (PG_DEGRADED) 2026-03-09T00:20:41.260 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-09T00:20:41.260 INFO:teuthology.orchestra.run.vm04.stdout:alertmanager.a vm04 *:9093,9094 running (17m) 87s ago 24m 28.2M - 0.25.0 c8568f914cd2 bcac0140b0f6 2026-03-09T00:20:41.260 INFO:teuthology.orchestra.run.vm04.stdout:grafana.a vm10 *:3000 running (5m) 0s ago 23m 90.1M - 10.4.0 c8b91775d855 aa7f793dcb8e 2026-03-09T00:20:41.260 INFO:teuthology.orchestra.run.vm04.stdout:iscsi.foo.vm04.fbyciv vm04 running (5m) 87s ago 23m 51.9M - 3.5 e1d6a67b021e cdb4168e72eb 2026-03-09T00:20:41.260 INFO:teuthology.orchestra.run.vm04.stdout:mgr.x vm10 *:8443,9283,8765 running (5m) 0s ago 25m 490M - 19.2.3-678-ge911bdeb 654f31e6858e c971ca6e9652 2026-03-09T00:20:41.260 INFO:teuthology.orchestra.run.vm04.stdout:mgr.y vm04 *:8443,9283,8765 running (14m) 87s ago 26m 557M - 19.2.3-678-ge911bdeb 654f31e6858e 72a51572b51b 2026-03-09T00:20:41.260 INFO:teuthology.orchestra.run.vm04.stdout:mon.a vm04 running (4m) 87s ago 26m 53.9M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 3a1ecb9ee7d1 2026-03-09T00:20:41.260 INFO:teuthology.orchestra.run.vm04.stdout:mon.b vm10 running (4m) 0s ago 26m 48.5M 2048M 19.2.3-678-ge911bdeb 654f31e6858e b102ade927df 2026-03-09T00:20:41.260 INFO:teuthology.orchestra.run.vm04.stdout:mon.c vm04 running (3m) 87s ago 26m 41.4M 2048M 19.2.3-678-ge911bdeb 654f31e6858e d5119f6d2345 2026-03-09T00:20:41.260 INFO:teuthology.orchestra.run.vm04.stdout:node-exporter.a vm04 *:9100 running (17m) 87s ago 24m 9.78M - 1.7.0 72c9c2088986 38e0af6b2fbf 2026-03-09T00:20:41.260 INFO:teuthology.orchestra.run.vm04.stdout:node-exporter.b vm10 *:9100 running (17m) 0s ago 24m 10.2M - 1.7.0 72c9c2088986 d059c0022310 2026-03-09T00:20:41.260 INFO:teuthology.orchestra.run.vm04.stdout:osd.0 vm04 running (3m) 87s ago 25m 70.7M 4096M 19.2.3-678-ge911bdeb 654f31e6858e a5eb77bcb38b 2026-03-09T00:20:41.260 INFO:teuthology.orchestra.run.vm04.stdout:osd.1 vm04 running (2m) 87s ago 25m 49.3M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 522cf40e592d 2026-03-09T00:20:41.260 INFO:teuthology.orchestra.run.vm04.stdout:osd.2 vm04 running (2m) 87s ago 25m 45.4M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 69a18f90367f 2026-03-09T00:20:41.260 INFO:teuthology.orchestra.run.vm04.stdout:osd.3 vm04 running (88s) 87s ago 25m 13.2M 4096M 19.2.3-678-ge911bdeb 654f31e6858e e8bef19a96a6 2026-03-09T00:20:41.260 INFO:teuthology.orchestra.run.vm04.stdout:osd.4 vm10 running (67s) 0s ago 25m 72.0M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 5cd5f044c189 2026-03-09T00:20:41.260 INFO:teuthology.orchestra.run.vm04.stdout:osd.5 vm10 running (45s) 0s ago 24m 49.0M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 50d8ee7c8cb6 2026-03-09T00:20:41.260 INFO:teuthology.orchestra.run.vm04.stdout:osd.6 vm10 running (23s) 0s ago 24m 47.5M 4096M 19.2.3-678-ge911bdeb 654f31e6858e d7d72f87911e 2026-03-09T00:20:41.260 INFO:teuthology.orchestra.run.vm04.stdout:osd.7 vm10 running (1s) 0s ago 24m 13.0M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 09c94bcb8e3f 2026-03-09T00:20:41.260 INFO:teuthology.orchestra.run.vm04.stdout:prometheus.a vm10 *:9095 running (5m) 0s ago 24m 52.4M - 2.51.0 1d3b7f56885b 1f53121cfa7f 2026-03-09T00:20:41.260 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.ehrfsf vm04 *:8000 running (23m) 87s ago 23m 100M - 17.2.0 e1d6a67b021e a815abb0c790 2026-03-09T00:20:41.260 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm10.dwizvi vm10 *:8000 running (23m) 0s ago 23m 98.8M - 17.2.0 e1d6a67b021e f6412acdf6e0 2026-03-09T00:20:41.525 INFO:teuthology.orchestra.run.vm04.stdout:{ 2026-03-09T00:20:41.525 INFO:teuthology.orchestra.run.vm04.stdout: "mon": { 2026-03-09T00:20:41.525 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 3 2026-03-09T00:20:41.525 INFO:teuthology.orchestra.run.vm04.stdout: }, 2026-03-09T00:20:41.525 INFO:teuthology.orchestra.run.vm04.stdout: "mgr": { 2026-03-09T00:20:41.525 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-09T00:20:41.525 INFO:teuthology.orchestra.run.vm04.stdout: }, 2026-03-09T00:20:41.525 INFO:teuthology.orchestra.run.vm04.stdout: "osd": { 2026-03-09T00:20:41.525 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 7 2026-03-09T00:20:41.525 INFO:teuthology.orchestra.run.vm04.stdout: }, 2026-03-09T00:20:41.525 INFO:teuthology.orchestra.run.vm04.stdout: "rgw": { 2026-03-09T00:20:41.526 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 2 2026-03-09T00:20:41.526 INFO:teuthology.orchestra.run.vm04.stdout: }, 2026-03-09T00:20:41.526 INFO:teuthology.orchestra.run.vm04.stdout: "overall": { 2026-03-09T00:20:41.526 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 2, 2026-03-09T00:20:41.526 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 12 2026-03-09T00:20:41.526 INFO:teuthology.orchestra.run.vm04.stdout: } 2026-03-09T00:20:41.526 INFO:teuthology.orchestra.run.vm04.stdout:} 2026-03-09T00:20:41.762 INFO:teuthology.orchestra.run.vm04.stdout:{ 2026-03-09T00:20:41.762 INFO:teuthology.orchestra.run.vm04.stdout: "target_image": "quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc", 2026-03-09T00:20:41.762 INFO:teuthology.orchestra.run.vm04.stdout: "in_progress": true, 2026-03-09T00:20:41.762 INFO:teuthology.orchestra.run.vm04.stdout: "which": "Upgrading daemons of type(s) crash,osd", 2026-03-09T00:20:41.762 INFO:teuthology.orchestra.run.vm04.stdout: "services_complete": [ 2026-03-09T00:20:41.763 INFO:teuthology.orchestra.run.vm04.stdout: "osd" 2026-03-09T00:20:41.763 INFO:teuthology.orchestra.run.vm04.stdout: ], 2026-03-09T00:20:41.763 INFO:teuthology.orchestra.run.vm04.stdout: "progress": "8/8 daemons upgraded", 2026-03-09T00:20:41.763 INFO:teuthology.orchestra.run.vm04.stdout: "message": "Currently upgrading osd daemons", 2026-03-09T00:20:41.763 INFO:teuthology.orchestra.run.vm04.stdout: "is_paused": false 2026-03-09T00:20:41.763 INFO:teuthology.orchestra.run.vm04.stdout:} 2026-03-09T00:20:41.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:41 vm04.local ceph-mon[96438]: pgmap v159: 161 pgs: 9 active+undersized, 10 peering, 16 stale+active+clean, 8 active+undersized+degraded, 118 active+clean; 457 KiB data, 253 MiB used, 160 GiB / 160 GiB avail; 15/723 objects degraded (2.075%) 2026-03-09T00:20:41.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:41 vm04.local ceph-mon[96438]: from='client.44400 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:20:41.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:41 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:41.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:41 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:41.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:41 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:41.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:41 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:41.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:41 vm04.local ceph-mon[96438]: from='client.? 192.168.123.104:0/2137542891' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:20:41.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:41 vm04.local ceph-mon[96438]: from='osd.7 [v2:192.168.123.110:6824/1320266469,v1:192.168.123.110:6825/1320266469]' entity='osd.7' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]: dispatch 2026-03-09T00:20:41.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:41 vm04.local ceph-mon[94619]: pgmap v159: 161 pgs: 9 active+undersized, 10 peering, 16 stale+active+clean, 8 active+undersized+degraded, 118 active+clean; 457 KiB data, 253 MiB used, 160 GiB / 160 GiB avail; 15/723 objects degraded (2.075%) 2026-03-09T00:20:41.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:41 vm04.local ceph-mon[94619]: from='client.44400 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:20:41.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:41 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:41.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:41 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:41.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:41 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:41.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:41 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:41.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:41 vm04.local ceph-mon[94619]: from='client.? 192.168.123.104:0/2137542891' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:20:41.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:41 vm04.local ceph-mon[94619]: from='osd.7 [v2:192.168.123.110:6824/1320266469,v1:192.168.123.110:6825/1320266469]' entity='osd.7' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]: dispatch 2026-03-09T00:20:41.893 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:41 vm10.local ceph-mon[82076]: pgmap v159: 161 pgs: 9 active+undersized, 10 peering, 16 stale+active+clean, 8 active+undersized+degraded, 118 active+clean; 457 KiB data, 253 MiB used, 160 GiB / 160 GiB avail; 15/723 objects degraded (2.075%) 2026-03-09T00:20:41.893 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:41 vm10.local ceph-mon[82076]: from='client.44400 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:20:41.893 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:41 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:41.893 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:41 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:41.893 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:41 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:41.893 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:41 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:41.893 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:41 vm10.local ceph-mon[82076]: from='client.? 192.168.123.104:0/2137542891' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:20:41.893 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:41 vm10.local ceph-mon[82076]: from='osd.7 [v2:192.168.123.110:6824/1320266469,v1:192.168.123.110:6825/1320266469]' entity='osd.7' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]: dispatch 2026-03-09T00:20:41.893 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:20:41 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-7[103089]: 2026-03-09T00:20:41.594+0000 7faf2ad82740 -1 osd.7 0 read_superblock omap replica is missing. 2026-03-09T00:20:41.893 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:20:41 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-7[103089]: 2026-03-09T00:20:41.614+0000 7faf2ad82740 -1 osd.7 132 log_to_monitors true 2026-03-09T00:20:43.053 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:42 vm04.local ceph-mon[94619]: from='client.54400 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:20:43.053 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:42 vm04.local ceph-mon[94619]: from='client.44406 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:20:43.053 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:42 vm04.local ceph-mon[94619]: from='client.54415 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:20:43.053 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:42 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:43.053 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:42 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:20:43.053 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:42 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:43.053 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:42 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:43.053 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:42 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:20:43.053 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:42 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:20:43.053 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:42 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:43.053 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:42 vm04.local ceph-mon[94619]: from='osd.7 [v2:192.168.123.110:6824/1320266469,v1:192.168.123.110:6825/1320266469]' entity='osd.7' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]': finished 2026-03-09T00:20:43.053 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:42 vm04.local ceph-mon[94619]: osdmap e135: 8 total, 7 up, 8 in 2026-03-09T00:20:43.053 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:42 vm04.local ceph-mon[94619]: from='osd.7 [v2:192.168.123.110:6824/1320266469,v1:192.168.123.110:6825/1320266469]' entity='osd.7' cmd=[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm10", "root=default"]}]: dispatch 2026-03-09T00:20:43.053 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:42 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:20:43.053 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:42 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:20:43.054 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:42 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:20:43.054 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:42 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:20:43.054 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:42 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:20:43.054 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:42 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:43.054 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:42 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.0"}]: dispatch 2026-03-09T00:20:43.054 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:42 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.0"}]': finished 2026-03-09T00:20:43.054 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:42 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.1"}]: dispatch 2026-03-09T00:20:43.054 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:42 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.1"}]': finished 2026-03-09T00:20:43.054 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:42 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.2"}]: dispatch 2026-03-09T00:20:43.054 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:42 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.2"}]': finished 2026-03-09T00:20:43.054 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:42 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.3"}]: dispatch 2026-03-09T00:20:43.054 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:42 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.3"}]': finished 2026-03-09T00:20:43.054 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:42 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.4"}]: dispatch 2026-03-09T00:20:43.054 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:42 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.4"}]': finished 2026-03-09T00:20:43.054 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:42 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.5"}]: dispatch 2026-03-09T00:20:43.054 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:42 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.5"}]': finished 2026-03-09T00:20:43.054 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:42 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.6"}]: dispatch 2026-03-09T00:20:43.054 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:42 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.6"}]': finished 2026-03-09T00:20:43.054 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:42 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.7"}]: dispatch 2026-03-09T00:20:43.054 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:42 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.7"}]': finished 2026-03-09T00:20:43.054 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:42 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd require-osd-release", "release": "squid"}]: dispatch 2026-03-09T00:20:43.054 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:42 vm04.local ceph-mon[96438]: from='client.54400 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:20:43.054 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:42 vm04.local ceph-mon[96438]: from='client.44406 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:20:43.054 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:42 vm04.local ceph-mon[96438]: from='client.54415 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:20:43.054 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:42 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:43.054 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:42 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:20:43.054 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:42 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:43.054 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:42 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:43.054 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:42 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:20:43.054 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:42 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:20:43.054 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:42 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:43.054 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:42 vm04.local ceph-mon[96438]: from='osd.7 [v2:192.168.123.110:6824/1320266469,v1:192.168.123.110:6825/1320266469]' entity='osd.7' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]': finished 2026-03-09T00:20:43.054 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:42 vm04.local ceph-mon[96438]: osdmap e135: 8 total, 7 up, 8 in 2026-03-09T00:20:43.054 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:42 vm04.local ceph-mon[96438]: from='osd.7 [v2:192.168.123.110:6824/1320266469,v1:192.168.123.110:6825/1320266469]' entity='osd.7' cmd=[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm10", "root=default"]}]: dispatch 2026-03-09T00:20:43.054 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:42 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:20:43.054 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:42 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:20:43.054 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:42 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:20:43.054 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:42 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:20:43.054 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:42 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:20:43.054 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:42 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:43.054 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:42 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.0"}]: dispatch 2026-03-09T00:20:43.054 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:42 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.0"}]': finished 2026-03-09T00:20:43.054 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:42 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.1"}]: dispatch 2026-03-09T00:20:43.054 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:42 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.1"}]': finished 2026-03-09T00:20:43.054 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:42 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.2"}]: dispatch 2026-03-09T00:20:43.054 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:42 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.2"}]': finished 2026-03-09T00:20:43.054 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:42 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.3"}]: dispatch 2026-03-09T00:20:43.054 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:42 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.3"}]': finished 2026-03-09T00:20:43.054 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:42 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.4"}]: dispatch 2026-03-09T00:20:43.054 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:42 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.4"}]': finished 2026-03-09T00:20:43.054 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:42 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.5"}]: dispatch 2026-03-09T00:20:43.054 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:42 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.5"}]': finished 2026-03-09T00:20:43.054 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:42 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.6"}]: dispatch 2026-03-09T00:20:43.054 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:42 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.6"}]': finished 2026-03-09T00:20:43.054 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:42 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.7"}]: dispatch 2026-03-09T00:20:43.054 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:42 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.7"}]': finished 2026-03-09T00:20:43.055 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:42 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd require-osd-release", "release": "squid"}]: dispatch 2026-03-09T00:20:43.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:42 vm10.local ceph-mon[82076]: from='client.54400 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:20:43.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:42 vm10.local ceph-mon[82076]: from='client.44406 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:20:43.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:42 vm10.local ceph-mon[82076]: from='client.54415 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:20:43.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:42 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:43.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:42 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:20:43.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:42 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:43.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:42 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:43.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:42 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:20:43.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:42 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:20:43.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:42 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:43.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:42 vm10.local ceph-mon[82076]: from='osd.7 [v2:192.168.123.110:6824/1320266469,v1:192.168.123.110:6825/1320266469]' entity='osd.7' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]': finished 2026-03-09T00:20:43.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:42 vm10.local ceph-mon[82076]: osdmap e135: 8 total, 7 up, 8 in 2026-03-09T00:20:43.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:42 vm10.local ceph-mon[82076]: from='osd.7 [v2:192.168.123.110:6824/1320266469,v1:192.168.123.110:6825/1320266469]' entity='osd.7' cmd=[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm10", "root=default"]}]: dispatch 2026-03-09T00:20:43.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:42 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:20:43.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:42 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:20:43.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:42 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:20:43.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:42 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:20:43.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:42 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:20:43.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:42 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:43.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:42 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.0"}]: dispatch 2026-03-09T00:20:43.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:42 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.0"}]': finished 2026-03-09T00:20:43.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:42 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.1"}]: dispatch 2026-03-09T00:20:43.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:42 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.1"}]': finished 2026-03-09T00:20:43.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:42 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.2"}]: dispatch 2026-03-09T00:20:43.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:42 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.2"}]': finished 2026-03-09T00:20:43.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:42 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.3"}]: dispatch 2026-03-09T00:20:43.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:42 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.3"}]': finished 2026-03-09T00:20:43.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:42 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.4"}]: dispatch 2026-03-09T00:20:43.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:42 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.4"}]': finished 2026-03-09T00:20:43.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:42 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.5"}]: dispatch 2026-03-09T00:20:43.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:42 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.5"}]': finished 2026-03-09T00:20:43.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:42 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.6"}]: dispatch 2026-03-09T00:20:43.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:42 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.6"}]': finished 2026-03-09T00:20:43.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:42 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.7"}]: dispatch 2026-03-09T00:20:43.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:42 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.7"}]': finished 2026-03-09T00:20:43.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:42 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd require-osd-release", "release": "squid"}]: dispatch 2026-03-09T00:20:43.751 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:20:43 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-7[103089]: 2026-03-09T00:20:43.435+0000 7faf2232c640 -1 osd.7 132 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-09T00:20:44.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:43 vm10.local ceph-mon[82076]: pgmap v160: 161 pgs: 39 active+undersized, 10 peering, 20 active+undersized+degraded, 92 active+clean; 457 KiB data, 253 MiB used, 160 GiB / 160 GiB avail; 76/723 objects degraded (10.512%) 2026-03-09T00:20:44.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:43 vm10.local ceph-mon[82076]: Upgrade: Setting container_image for all osd 2026-03-09T00:20:44.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:43 vm10.local ceph-mon[82076]: Upgrade: Setting require_osd_release to 19 squid 2026-03-09T00:20:44.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:43 vm10.local ceph-mon[82076]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-09T00:20:44.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:43 vm10.local ceph-mon[82076]: Health check cleared: OSD_UPGRADE_FINISHED (was: all OSDs are running squid or later but require_osd_release < squid) 2026-03-09T00:20:44.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:43 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "osd require-osd-release", "release": "squid"}]': finished 2026-03-09T00:20:44.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:43 vm10.local ceph-mon[82076]: osd.7 [v2:192.168.123.110:6824/1320266469,v1:192.168.123.110:6825/1320266469] boot 2026-03-09T00:20:44.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:43 vm10.local ceph-mon[82076]: osdmap e136: 8 total, 8 up, 8 in 2026-03-09T00:20:44.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:43 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T00:20:44.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:43 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:20:44.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:43 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:44.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:43 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:20:44.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:43 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:44.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:43 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:20:44.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:43 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:20:44.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:43 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:44.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:43 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:20:44.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:43 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:44.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:43 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:20:44.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:43 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:44.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:43 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:20:44.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:43 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:20:44.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:43 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:20:44.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:43 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:20:44.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:43 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:20:44.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:43 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:20:44.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:43 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]: dispatch 2026-03-09T00:20:44.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:43 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]': finished 2026-03-09T00:20:44.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:43 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:20:44.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:43 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon"}]': finished 2026-03-09T00:20:44.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:43 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]: dispatch 2026-03-09T00:20:44.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:43 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]': finished 2026-03-09T00:20:44.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:43 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd"}]: dispatch 2026-03-09T00:20:44.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:43 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd"}]': finished 2026-03-09T00:20:44.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:43 vm04.local ceph-mon[94619]: pgmap v160: 161 pgs: 39 active+undersized, 10 peering, 20 active+undersized+degraded, 92 active+clean; 457 KiB data, 253 MiB used, 160 GiB / 160 GiB avail; 76/723 objects degraded (10.512%) 2026-03-09T00:20:44.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:43 vm04.local ceph-mon[94619]: Upgrade: Setting container_image for all osd 2026-03-09T00:20:44.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:43 vm04.local ceph-mon[94619]: Upgrade: Setting require_osd_release to 19 squid 2026-03-09T00:20:44.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:43 vm04.local ceph-mon[94619]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-09T00:20:44.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:43 vm04.local ceph-mon[94619]: Health check cleared: OSD_UPGRADE_FINISHED (was: all OSDs are running squid or later but require_osd_release < squid) 2026-03-09T00:20:44.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:43 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "osd require-osd-release", "release": "squid"}]': finished 2026-03-09T00:20:44.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:43 vm04.local ceph-mon[94619]: osd.7 [v2:192.168.123.110:6824/1320266469,v1:192.168.123.110:6825/1320266469] boot 2026-03-09T00:20:44.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:43 vm04.local ceph-mon[94619]: osdmap e136: 8 total, 8 up, 8 in 2026-03-09T00:20:44.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:43 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T00:20:44.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:43 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:20:44.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:43 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:44.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:43 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:20:44.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:43 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:44.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:43 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:20:44.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:43 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:20:44.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:43 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:44.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:43 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:20:44.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:43 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:44.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:43 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:20:44.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:43 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:44.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:43 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:20:44.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:43 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:20:44.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:43 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:20:44.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:43 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:20:44.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:43 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:20:44.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:43 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:20:44.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:43 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]: dispatch 2026-03-09T00:20:44.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:43 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]': finished 2026-03-09T00:20:44.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:43 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:20:44.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:43 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon"}]': finished 2026-03-09T00:20:44.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:43 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]: dispatch 2026-03-09T00:20:44.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:43 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]': finished 2026-03-09T00:20:44.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:43 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd"}]: dispatch 2026-03-09T00:20:44.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:43 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd"}]': finished 2026-03-09T00:20:44.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:43 vm04.local ceph-mon[96438]: pgmap v160: 161 pgs: 39 active+undersized, 10 peering, 20 active+undersized+degraded, 92 active+clean; 457 KiB data, 253 MiB used, 160 GiB / 160 GiB avail; 76/723 objects degraded (10.512%) 2026-03-09T00:20:44.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:43 vm04.local ceph-mon[96438]: Upgrade: Setting container_image for all osd 2026-03-09T00:20:44.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:43 vm04.local ceph-mon[96438]: Upgrade: Setting require_osd_release to 19 squid 2026-03-09T00:20:44.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:43 vm04.local ceph-mon[96438]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-09T00:20:44.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:43 vm04.local ceph-mon[96438]: Health check cleared: OSD_UPGRADE_FINISHED (was: all OSDs are running squid or later but require_osd_release < squid) 2026-03-09T00:20:44.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:43 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "osd require-osd-release", "release": "squid"}]': finished 2026-03-09T00:20:44.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:43 vm04.local ceph-mon[96438]: osd.7 [v2:192.168.123.110:6824/1320266469,v1:192.168.123.110:6825/1320266469] boot 2026-03-09T00:20:44.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:43 vm04.local ceph-mon[96438]: osdmap e136: 8 total, 8 up, 8 in 2026-03-09T00:20:44.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:43 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T00:20:44.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:43 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:20:44.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:43 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:44.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:43 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:20:44.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:43 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:44.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:43 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:20:44.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:43 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:20:44.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:43 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:44.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:43 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:20:44.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:43 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:44.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:43 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:20:44.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:43 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:44.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:43 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:20:44.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:43 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:20:44.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:43 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:20:44.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:43 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:20:44.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:43 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:20:44.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:43 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:20:44.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:43 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]: dispatch 2026-03-09T00:20:44.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:43 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]': finished 2026-03-09T00:20:44.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:43 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:20:44.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:43 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon"}]': finished 2026-03-09T00:20:44.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:43 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]: dispatch 2026-03-09T00:20:44.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:43 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]': finished 2026-03-09T00:20:44.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:43 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd"}]: dispatch 2026-03-09T00:20:44.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:43 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd"}]': finished 2026-03-09T00:20:44.753 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:44 vm04.local ceph-mon[94619]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:20:44.754 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:44 vm04.local ceph-mon[94619]: OSD bench result of 37169.178054 IOPS is not within the threshold limit range of 50.000000 IOPS and 500.000000 IOPS for osd.7. IOPS capacity is unchanged at 315.000000 IOPS. The recommendation is to establish the osd's IOPS capacity using other benchmark tools (e.g. Fio) and then override osd_mclock_max_capacity_iops_[hdd|ssd]. 2026-03-09T00:20:44.754 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:44 vm04.local ceph-mon[94619]: Upgrade: Setting container_image for all mds 2026-03-09T00:20:44.754 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:44 vm04.local ceph-mon[94619]: Upgrade: Setting container_image for all rbd-mirror 2026-03-09T00:20:44.754 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:44 vm04.local ceph-mon[94619]: Upgrade: Setting container_image for all ceph-exporter 2026-03-09T00:20:44.754 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:44 vm04.local ceph-mon[94619]: Upgrade: Setting container_image for all nfs 2026-03-09T00:20:44.754 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:44 vm04.local ceph-mon[94619]: Upgrade: Setting container_image for all nvmeof 2026-03-09T00:20:45.023 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:44 vm04.local ceph-mon[96438]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:20:45.023 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:44 vm04.local ceph-mon[96438]: OSD bench result of 37169.178054 IOPS is not within the threshold limit range of 50.000000 IOPS and 500.000000 IOPS for osd.7. IOPS capacity is unchanged at 315.000000 IOPS. The recommendation is to establish the osd's IOPS capacity using other benchmark tools (e.g. Fio) and then override osd_mclock_max_capacity_iops_[hdd|ssd]. 2026-03-09T00:20:45.023 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:44 vm04.local ceph-mon[96438]: Upgrade: Setting container_image for all mds 2026-03-09T00:20:45.023 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:44 vm04.local ceph-mon[96438]: Upgrade: Setting container_image for all rbd-mirror 2026-03-09T00:20:45.023 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:44 vm04.local ceph-mon[96438]: Upgrade: Setting container_image for all ceph-exporter 2026-03-09T00:20:45.023 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:44 vm04.local ceph-mon[96438]: Upgrade: Setting container_image for all nfs 2026-03-09T00:20:45.023 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:44 vm04.local ceph-mon[96438]: Upgrade: Setting container_image for all nvmeof 2026-03-09T00:20:45.023 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:44 vm04.local ceph-mon[96438]: Upgrade: Finalizing container_image settings 2026-03-09T00:20:45.023 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:44 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mds"}]: dispatch 2026-03-09T00:20:45.023 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:44 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mds"}]': finished 2026-03-09T00:20:45.023 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:44 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw"}]: dispatch 2026-03-09T00:20:45.024 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:44 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]: dispatch 2026-03-09T00:20:45.024 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:44 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]': finished 2026-03-09T00:20:45.024 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:44 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:20:45.024 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:44 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]: dispatch 2026-03-09T00:20:45.024 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:44 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]': finished 2026-03-09T00:20:45.024 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:44 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.iscsi"}]: dispatch 2026-03-09T00:20:45.024 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:44 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]: dispatch 2026-03-09T00:20:45.024 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:44 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]': finished 2026-03-09T00:20:45.024 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:44 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]: dispatch 2026-03-09T00:20:45.024 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:44 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]': finished 2026-03-09T00:20:45.024 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:44 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:20:45.024 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:44 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:20:45.024 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:44 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:20:45.024 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:44 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:20:45.024 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:44 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:20:45.024 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:44 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:20:45.024 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:44 vm04.local ceph-mon[96438]: Upgrade: Complete! 2026-03-09T00:20:45.024 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:44 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]: dispatch 2026-03-09T00:20:45.024 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:44 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]': finished 2026-03-09T00:20:45.024 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:44 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:20:45.024 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:44 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:20:45.024 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:44 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:20:45.024 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:44 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:45.024 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:44 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:20:45.024 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:44 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:20:45.024 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:44 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:20:45.024 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:44 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:45.024 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:44 vm04.local ceph-mon[96438]: osdmap e137: 8 total, 8 up, 8 in 2026-03-09T00:20:45.024 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:44 vm04.local ceph-mon[94619]: Upgrade: Finalizing container_image settings 2026-03-09T00:20:45.024 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:44 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mds"}]: dispatch 2026-03-09T00:20:45.024 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:44 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mds"}]': finished 2026-03-09T00:20:45.024 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:44 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw"}]: dispatch 2026-03-09T00:20:45.024 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:44 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]: dispatch 2026-03-09T00:20:45.024 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:44 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]': finished 2026-03-09T00:20:45.024 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:44 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:20:45.024 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:44 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]: dispatch 2026-03-09T00:20:45.024 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:44 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]': finished 2026-03-09T00:20:45.024 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:44 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.iscsi"}]: dispatch 2026-03-09T00:20:45.024 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:44 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]: dispatch 2026-03-09T00:20:45.024 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:44 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]': finished 2026-03-09T00:20:45.024 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:44 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]: dispatch 2026-03-09T00:20:45.024 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:44 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]': finished 2026-03-09T00:20:45.024 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:44 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:20:45.024 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:44 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:20:45.024 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:44 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:20:45.024 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:44 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:20:45.024 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:44 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:20:45.024 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:44 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:20:45.024 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:44 vm04.local ceph-mon[94619]: Upgrade: Complete! 2026-03-09T00:20:45.024 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:44 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]: dispatch 2026-03-09T00:20:45.024 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:44 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]': finished 2026-03-09T00:20:45.024 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:44 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:20:45.024 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:44 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:20:45.024 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:44 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:20:45.024 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:44 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:45.024 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:44 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:20:45.025 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:44 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:20:45.025 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:44 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:20:45.025 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:44 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:45.025 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:44 vm04.local ceph-mon[94619]: osdmap e137: 8 total, 8 up, 8 in 2026-03-09T00:20:45.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:44 vm10.local ceph-mon[82076]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:20:45.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:44 vm10.local ceph-mon[82076]: OSD bench result of 37169.178054 IOPS is not within the threshold limit range of 50.000000 IOPS and 500.000000 IOPS for osd.7. IOPS capacity is unchanged at 315.000000 IOPS. The recommendation is to establish the osd's IOPS capacity using other benchmark tools (e.g. Fio) and then override osd_mclock_max_capacity_iops_[hdd|ssd]. 2026-03-09T00:20:45.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:44 vm10.local ceph-mon[82076]: Upgrade: Setting container_image for all mds 2026-03-09T00:20:45.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:44 vm10.local ceph-mon[82076]: Upgrade: Setting container_image for all rbd-mirror 2026-03-09T00:20:45.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:44 vm10.local ceph-mon[82076]: Upgrade: Setting container_image for all ceph-exporter 2026-03-09T00:20:45.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:44 vm10.local ceph-mon[82076]: Upgrade: Setting container_image for all nfs 2026-03-09T00:20:45.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:44 vm10.local ceph-mon[82076]: Upgrade: Setting container_image for all nvmeof 2026-03-09T00:20:45.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:44 vm10.local ceph-mon[82076]: Upgrade: Finalizing container_image settings 2026-03-09T00:20:45.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:44 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mds"}]: dispatch 2026-03-09T00:20:45.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:44 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mds"}]': finished 2026-03-09T00:20:45.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:44 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw"}]: dispatch 2026-03-09T00:20:45.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:44 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]: dispatch 2026-03-09T00:20:45.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:44 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]': finished 2026-03-09T00:20:45.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:44 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:20:45.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:44 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]: dispatch 2026-03-09T00:20:45.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:44 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]': finished 2026-03-09T00:20:45.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:44 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.iscsi"}]: dispatch 2026-03-09T00:20:45.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:44 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]: dispatch 2026-03-09T00:20:45.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:44 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]': finished 2026-03-09T00:20:45.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:44 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]: dispatch 2026-03-09T00:20:45.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:44 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]': finished 2026-03-09T00:20:45.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:44 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:20:45.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:44 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:20:45.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:44 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:20:45.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:44 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:20:45.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:44 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:20:45.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:44 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:20:45.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:44 vm10.local ceph-mon[82076]: Upgrade: Complete! 2026-03-09T00:20:45.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:44 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]: dispatch 2026-03-09T00:20:45.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:44 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]': finished 2026-03-09T00:20:45.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:44 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:20:45.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:44 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:20:45.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:44 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:20:45.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:44 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:45.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:44 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:20:45.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:44 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:20:45.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:44 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:20:45.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:44 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:45.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:44 vm10.local ceph-mon[82076]: osdmap e137: 8 total, 8 up, 8 in 2026-03-09T00:20:45.351 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:20:45 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:20:45] "GET /metrics HTTP/1.1" 200 37946 "" "Prometheus/2.51.0" 2026-03-09T00:20:46.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:45 vm10.local ceph-mon[82076]: pgmap v163: 161 pgs: 43 active+undersized, 26 active+undersized+degraded, 92 active+clean; 457 KiB data, 275 MiB used, 160 GiB / 160 GiB avail; 94/723 objects degraded (13.001%) 2026-03-09T00:20:46.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:45 vm10.local ceph-mon[82076]: Health check cleared: PG_AVAILABILITY (was: Reduced data availability: 2 pgs peering) 2026-03-09T00:20:46.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:45 vm04.local ceph-mon[94619]: pgmap v163: 161 pgs: 43 active+undersized, 26 active+undersized+degraded, 92 active+clean; 457 KiB data, 275 MiB used, 160 GiB / 160 GiB avail; 94/723 objects degraded (13.001%) 2026-03-09T00:20:46.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:45 vm04.local ceph-mon[94619]: Health check cleared: PG_AVAILABILITY (was: Reduced data availability: 2 pgs peering) 2026-03-09T00:20:46.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:45 vm04.local ceph-mon[96438]: pgmap v163: 161 pgs: 43 active+undersized, 26 active+undersized+degraded, 92 active+clean; 457 KiB data, 275 MiB used, 160 GiB / 160 GiB avail; 94/723 objects degraded (13.001%) 2026-03-09T00:20:46.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:45 vm04.local ceph-mon[96438]: Health check cleared: PG_AVAILABILITY (was: Reduced data availability: 2 pgs peering) 2026-03-09T00:20:47.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:46 vm10.local ceph-mon[82076]: pgmap v165: 161 pgs: 4 peering, 17 active+undersized, 16 active+undersized+degraded, 124 active+clean; 457 KiB data, 275 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s; 65/723 objects degraded (8.990%) 2026-03-09T00:20:47.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:46 vm10.local ceph-mon[82076]: Health check update: Degraded data redundancy: 65/723 objects degraded (8.990%), 16 pgs degraded (PG_DEGRADED) 2026-03-09T00:20:47.100 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:20:47 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:20:47.021Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:20:47.101 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:20:47 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:20:47.022Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:20:47.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:46 vm04.local ceph-mon[94619]: pgmap v165: 161 pgs: 4 peering, 17 active+undersized, 16 active+undersized+degraded, 124 active+clean; 457 KiB data, 275 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s; 65/723 objects degraded (8.990%) 2026-03-09T00:20:47.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:46 vm04.local ceph-mon[94619]: Health check update: Degraded data redundancy: 65/723 objects degraded (8.990%), 16 pgs degraded (PG_DEGRADED) 2026-03-09T00:20:47.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:46 vm04.local ceph-mon[96438]: pgmap v165: 161 pgs: 4 peering, 17 active+undersized, 16 active+undersized+degraded, 124 active+clean; 457 KiB data, 275 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s; 65/723 objects degraded (8.990%) 2026-03-09T00:20:47.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:46 vm04.local ceph-mon[96438]: Health check update: Degraded data redundancy: 65/723 objects degraded (8.990%), 16 pgs degraded (PG_DEGRADED) 2026-03-09T00:20:48.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:48 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:48.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:48 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:48.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:48 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:49.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:49 vm10.local ceph-mon[82076]: pgmap v166: 161 pgs: 4 peering, 3 active+undersized, 4 active+undersized+degraded, 150 active+clean; 457 KiB data, 275 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s; 9/723 objects degraded (1.245%) 2026-03-09T00:20:49.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:49 vm04.local ceph-mon[96438]: pgmap v166: 161 pgs: 4 peering, 3 active+undersized, 4 active+undersized+degraded, 150 active+clean; 457 KiB data, 275 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s; 9/723 objects degraded (1.245%) 2026-03-09T00:20:49.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:49 vm04.local ceph-mon[94619]: pgmap v166: 161 pgs: 4 peering, 3 active+undersized, 4 active+undersized+degraded, 150 active+clean; 457 KiB data, 275 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s; 9/723 objects degraded (1.245%) 2026-03-09T00:20:50.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:50 vm10.local ceph-mon[82076]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 9/723 objects degraded (1.245%), 4 pgs degraded) 2026-03-09T00:20:50.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:50 vm10.local ceph-mon[82076]: Cluster is now healthy 2026-03-09T00:20:50.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:50 vm04.local ceph-mon[96438]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 9/723 objects degraded (1.245%), 4 pgs degraded) 2026-03-09T00:20:50.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:50 vm04.local ceph-mon[96438]: Cluster is now healthy 2026-03-09T00:20:50.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:50 vm04.local ceph-mon[94619]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 9/723 objects degraded (1.245%), 4 pgs degraded) 2026-03-09T00:20:50.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:50 vm04.local ceph-mon[94619]: Cluster is now healthy 2026-03-09T00:20:51.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:51 vm10.local ceph-mon[82076]: pgmap v167: 161 pgs: 161 active+clean; 457 KiB data, 279 MiB used, 160 GiB / 160 GiB avail; 689 B/s rd, 0 op/s 2026-03-09T00:20:51.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:51 vm04.local ceph-mon[96438]: pgmap v167: 161 pgs: 161 active+clean; 457 KiB data, 279 MiB used, 160 GiB / 160 GiB avail; 689 B/s rd, 0 op/s 2026-03-09T00:20:51.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:51 vm04.local ceph-mon[94619]: pgmap v167: 161 pgs: 161 active+clean; 457 KiB data, 279 MiB used, 160 GiB / 160 GiB avail; 689 B/s rd, 0 op/s 2026-03-09T00:20:53.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:53 vm10.local ceph-mon[82076]: pgmap v168: 161 pgs: 161 active+clean; 457 KiB data, 279 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:20:53.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:53 vm04.local ceph-mon[96438]: pgmap v168: 161 pgs: 161 active+clean; 457 KiB data, 279 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:20:53.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:53 vm04.local ceph-mon[94619]: pgmap v168: 161 pgs: 161 active+clean; 457 KiB data, 279 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:20:54.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:54 vm10.local ceph-mon[82076]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:20:54.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:54 vm04.local ceph-mon[96438]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:20:54.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:54 vm04.local ceph-mon[94619]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:20:55.351 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:20:55 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:20:55] "GET /metrics HTTP/1.1" 200 37946 "" "Prometheus/2.51.0" 2026-03-09T00:20:55.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:55 vm10.local ceph-mon[82076]: pgmap v169: 161 pgs: 161 active+clean; 457 KiB data, 279 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-09T00:20:55.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:55 vm04.local ceph-mon[96438]: pgmap v169: 161 pgs: 161 active+clean; 457 KiB data, 279 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-09T00:20:55.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:55 vm04.local ceph-mon[94619]: pgmap v169: 161 pgs: 161 active+clean; 457 KiB data, 279 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-09T00:20:57.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:20:57 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:20:57.022Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:20:57.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:20:57 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:20:57.023Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:20:57.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:57 vm10.local ceph-mon[82076]: pgmap v170: 161 pgs: 161 active+clean; 457 KiB data, 279 MiB used, 160 GiB / 160 GiB avail; 1.3 KiB/s rd, 1 op/s 2026-03-09T00:20:57.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:57 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:57.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:57 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:20:57.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:57 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:57.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:57 vm04.local ceph-mon[96438]: pgmap v170: 161 pgs: 161 active+clean; 457 KiB data, 279 MiB used, 160 GiB / 160 GiB avail; 1.3 KiB/s rd, 1 op/s 2026-03-09T00:20:57.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:57 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:57.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:57 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:20:57.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:57 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:57.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:57 vm04.local ceph-mon[94619]: pgmap v170: 161 pgs: 161 active+clean; 457 KiB data, 279 MiB used, 160 GiB / 160 GiB avail; 1.3 KiB/s rd, 1 op/s 2026-03-09T00:20:57.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:57 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:57.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:57 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:20:57.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:57 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:20:59.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:20:59 vm10.local ceph-mon[82076]: pgmap v171: 161 pgs: 161 active+clean; 457 KiB data, 279 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:20:59.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:20:59 vm04.local ceph-mon[96438]: pgmap v171: 161 pgs: 161 active+clean; 457 KiB data, 279 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:20:59.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:20:59 vm04.local ceph-mon[94619]: pgmap v171: 161 pgs: 161 active+clean; 457 KiB data, 279 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:21:01.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:01 vm10.local ceph-mon[82076]: pgmap v172: 161 pgs: 161 active+clean; 457 KiB data, 279 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:21:01.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:01 vm04.local ceph-mon[96438]: pgmap v172: 161 pgs: 161 active+clean; 457 KiB data, 279 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:21:01.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:01 vm04.local ceph-mon[94619]: pgmap v172: 161 pgs: 161 active+clean; 457 KiB data, 279 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:21:03.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:03 vm10.local ceph-mon[82076]: pgmap v173: 161 pgs: 161 active+clean; 457 KiB data, 279 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:21:03.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:03 vm04.local ceph-mon[96438]: pgmap v173: 161 pgs: 161 active+clean; 457 KiB data, 279 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:21:03.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:03 vm04.local ceph-mon[94619]: pgmap v173: 161 pgs: 161 active+clean; 457 KiB data, 279 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:21:04.514 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:21:04 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:21:04.149Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:21:04.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:04 vm10.local ceph-mon[82076]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:21:04.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:04 vm04.local ceph-mon[96438]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:21:04.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:04 vm04.local ceph-mon[94619]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:21:05.350 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:21:05 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:21:05] "GET /metrics HTTP/1.1" 200 37976 "" "Prometheus/2.51.0" 2026-03-09T00:21:05.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:05 vm10.local ceph-mon[82076]: pgmap v174: 161 pgs: 161 active+clean; 457 KiB data, 279 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:21:05.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:05 vm04.local ceph-mon[96438]: pgmap v174: 161 pgs: 161 active+clean; 457 KiB data, 279 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:21:05.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:05 vm04.local ceph-mon[94619]: pgmap v174: 161 pgs: 161 active+clean; 457 KiB data, 279 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:21:07.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:21:07 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:21:07.023Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:21:07.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:21:07 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:21:07.024Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:21:07.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:07 vm10.local ceph-mon[82076]: pgmap v175: 161 pgs: 161 active+clean; 457 KiB data, 279 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:21:07.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:07 vm04.local ceph-mon[96438]: pgmap v175: 161 pgs: 161 active+clean; 457 KiB data, 279 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:21:07.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:07 vm04.local ceph-mon[94619]: pgmap v175: 161 pgs: 161 active+clean; 457 KiB data, 279 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:21:09.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:09 vm10.local ceph-mon[82076]: pgmap v176: 161 pgs: 161 active+clean; 457 KiB data, 279 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:21:09.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:09 vm04.local ceph-mon[96438]: pgmap v176: 161 pgs: 161 active+clean; 457 KiB data, 279 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:21:09.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:09 vm04.local ceph-mon[94619]: pgmap v176: 161 pgs: 161 active+clean; 457 KiB data, 279 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:21:11.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:11 vm10.local ceph-mon[82076]: pgmap v177: 161 pgs: 161 active+clean; 457 KiB data, 279 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:21:11.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:11 vm04.local ceph-mon[94619]: pgmap v177: 161 pgs: 161 active+clean; 457 KiB data, 279 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:21:11.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:11 vm04.local ceph-mon[96438]: pgmap v177: 161 pgs: 161 active+clean; 457 KiB data, 279 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:21:12.033 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch ps' 2026-03-09T00:21:12.517 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-09T00:21:12.517 INFO:teuthology.orchestra.run.vm04.stdout:alertmanager.a vm04 *:9093,9094 running (17m) 118s ago 24m 28.2M - 0.25.0 c8568f914cd2 bcac0140b0f6 2026-03-09T00:21:12.517 INFO:teuthology.orchestra.run.vm04.stdout:grafana.a vm10 *:3000 running (5m) 31s ago 24m 90.1M - 10.4.0 c8b91775d855 aa7f793dcb8e 2026-03-09T00:21:12.517 INFO:teuthology.orchestra.run.vm04.stdout:iscsi.foo.vm04.fbyciv vm04 running (5m) 118s ago 24m 51.9M - 3.5 e1d6a67b021e cdb4168e72eb 2026-03-09T00:21:12.517 INFO:teuthology.orchestra.run.vm04.stdout:mgr.x vm10 *:8443,9283,8765 running (5m) 31s ago 26m 490M - 19.2.3-678-ge911bdeb 654f31e6858e c971ca6e9652 2026-03-09T00:21:12.517 INFO:teuthology.orchestra.run.vm04.stdout:mgr.y vm04 *:8443,9283,8765 running (15m) 118s ago 27m 557M - 19.2.3-678-ge911bdeb 654f31e6858e 72a51572b51b 2026-03-09T00:21:12.517 INFO:teuthology.orchestra.run.vm04.stdout:mon.a vm04 running (4m) 118s ago 27m 53.9M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 3a1ecb9ee7d1 2026-03-09T00:21:12.517 INFO:teuthology.orchestra.run.vm04.stdout:mon.b vm10 running (5m) 31s ago 26m 48.5M 2048M 19.2.3-678-ge911bdeb 654f31e6858e b102ade927df 2026-03-09T00:21:12.517 INFO:teuthology.orchestra.run.vm04.stdout:mon.c vm04 running (4m) 118s ago 26m 41.4M 2048M 19.2.3-678-ge911bdeb 654f31e6858e d5119f6d2345 2026-03-09T00:21:12.517 INFO:teuthology.orchestra.run.vm04.stdout:node-exporter.a vm04 *:9100 running (18m) 118s ago 24m 9.78M - 1.7.0 72c9c2088986 38e0af6b2fbf 2026-03-09T00:21:12.517 INFO:teuthology.orchestra.run.vm04.stdout:node-exporter.b vm10 *:9100 running (17m) 31s ago 24m 10.2M - 1.7.0 72c9c2088986 d059c0022310 2026-03-09T00:21:12.517 INFO:teuthology.orchestra.run.vm04.stdout:osd.0 vm04 running (3m) 118s ago 26m 70.7M 4096M 19.2.3-678-ge911bdeb 654f31e6858e a5eb77bcb38b 2026-03-09T00:21:12.517 INFO:teuthology.orchestra.run.vm04.stdout:osd.1 vm04 running (3m) 118s ago 26m 49.3M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 522cf40e592d 2026-03-09T00:21:12.517 INFO:teuthology.orchestra.run.vm04.stdout:osd.2 vm04 running (2m) 118s ago 25m 45.4M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 69a18f90367f 2026-03-09T00:21:12.518 INFO:teuthology.orchestra.run.vm04.stdout:osd.3 vm04 running (119s) 118s ago 25m 13.2M 4096M 19.2.3-678-ge911bdeb 654f31e6858e e8bef19a96a6 2026-03-09T00:21:12.518 INFO:teuthology.orchestra.run.vm04.stdout:osd.4 vm10 running (98s) 31s ago 25m 72.0M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 5cd5f044c189 2026-03-09T00:21:12.518 INFO:teuthology.orchestra.run.vm04.stdout:osd.5 vm10 running (76s) 31s ago 25m 49.0M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 50d8ee7c8cb6 2026-03-09T00:21:12.518 INFO:teuthology.orchestra.run.vm04.stdout:osd.6 vm10 running (54s) 31s ago 25m 47.5M 4096M 19.2.3-678-ge911bdeb 654f31e6858e d7d72f87911e 2026-03-09T00:21:12.518 INFO:teuthology.orchestra.run.vm04.stdout:osd.7 vm10 running (32s) 31s ago 25m 13.0M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 09c94bcb8e3f 2026-03-09T00:21:12.518 INFO:teuthology.orchestra.run.vm04.stdout:prometheus.a vm10 *:9095 running (5m) 31s ago 24m 52.4M - 2.51.0 1d3b7f56885b 1f53121cfa7f 2026-03-09T00:21:12.518 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.ehrfsf vm04 *:8000 running (24m) 118s ago 24m 100M - 17.2.0 e1d6a67b021e a815abb0c790 2026-03-09T00:21:12.518 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm10.dwizvi vm10 *:8000 running (24m) 31s ago 24m 98.8M - 17.2.0 e1d6a67b021e f6412acdf6e0 2026-03-09T00:21:12.583 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph versions | jq -e '"'"'.osd | length == 1'"'"'' 2026-03-09T00:21:12.775 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:12 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:21:12.776 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:12 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:21:12.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:12 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:21:13.330 INFO:teuthology.orchestra.run.vm04.stdout:true 2026-03-09T00:21:13.575 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph versions | jq -e '"'"'.osd | keys'"'"' | grep $sha1' 2026-03-09T00:21:13.597 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:13 vm04.local ceph-mon[94619]: pgmap v178: 161 pgs: 161 active+clean; 457 KiB data, 279 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:21:13.597 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:13 vm04.local ceph-mon[94619]: from='client.44421 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:21:13.597 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:13 vm04.local ceph-mon[94619]: from='client.54427 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:21:13.597 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:13 vm04.local ceph-mon[94619]: from='client.? 192.168.123.104:0/4176864997' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:13.597 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:13 vm04.local ceph-mon[96438]: pgmap v178: 161 pgs: 161 active+clean; 457 KiB data, 279 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:21:13.597 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:13 vm04.local ceph-mon[96438]: from='client.44421 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:21:13.597 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:13 vm04.local ceph-mon[96438]: from='client.54427 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:21:13.597 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:13 vm04.local ceph-mon[96438]: from='client.? 192.168.123.104:0/4176864997' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:13.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:13 vm10.local ceph-mon[82076]: pgmap v178: 161 pgs: 161 active+clean; 457 KiB data, 279 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:21:13.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:13 vm10.local ceph-mon[82076]: from='client.44421 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:21:13.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:13 vm10.local ceph-mon[82076]: from='client.54427 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:21:13.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:13 vm10.local ceph-mon[82076]: from='client.? 192.168.123.104:0/4176864997' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:14.098 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)" 2026-03-09T00:21:14.141 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch upgrade status' 2026-03-09T00:21:14.369 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:21:14 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:21:14.149Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=1 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:21:14.720 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:14 vm04.local ceph-mon[96438]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:21:14.721 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:14 vm04.local ceph-mon[96438]: from='client.? 192.168.123.104:0/3586255036' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:14.721 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:14 vm04.local ceph-mon[94619]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:21:14.721 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:14 vm04.local ceph-mon[94619]: from='client.? 192.168.123.104:0/3586255036' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:14.721 INFO:teuthology.orchestra.run.vm04.stdout:{ 2026-03-09T00:21:14.721 INFO:teuthology.orchestra.run.vm04.stdout: "target_image": null, 2026-03-09T00:21:14.721 INFO:teuthology.orchestra.run.vm04.stdout: "in_progress": false, 2026-03-09T00:21:14.721 INFO:teuthology.orchestra.run.vm04.stdout: "which": "", 2026-03-09T00:21:14.721 INFO:teuthology.orchestra.run.vm04.stdout: "services_complete": [], 2026-03-09T00:21:14.721 INFO:teuthology.orchestra.run.vm04.stdout: "progress": null, 2026-03-09T00:21:14.721 INFO:teuthology.orchestra.run.vm04.stdout: "message": "", 2026-03-09T00:21:14.721 INFO:teuthology.orchestra.run.vm04.stdout: "is_paused": false 2026-03-09T00:21:14.721 INFO:teuthology.orchestra.run.vm04.stdout:} 2026-03-09T00:21:14.808 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph health detail' 2026-03-09T00:21:14.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:14 vm10.local ceph-mon[82076]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:21:14.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:14 vm10.local ceph-mon[82076]: from='client.? 192.168.123.104:0/3586255036' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:15.346 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:21:15 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:21:15] "GET /metrics HTTP/1.1" 200 37973 "" "Prometheus/2.51.0" 2026-03-09T00:21:15.346 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_OK 2026-03-09T00:21:15.412 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --services rgw.foo' 2026-03-09T00:21:15.597 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:15 vm04.local ceph-mon[94619]: pgmap v179: 161 pgs: 161 active+clean; 457 KiB data, 279 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:21:15.597 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:15 vm04.local ceph-mon[94619]: from='client.54445 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:21:15.597 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:15 vm04.local ceph-mon[94619]: from='client.? 192.168.123.104:0/2774763162' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T00:21:15.598 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:15 vm04.local ceph-mon[96438]: pgmap v179: 161 pgs: 161 active+clean; 457 KiB data, 279 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:21:15.598 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:15 vm04.local ceph-mon[96438]: from='client.54445 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:21:15.598 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:15 vm04.local ceph-mon[96438]: from='client.? 192.168.123.104:0/2774763162' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T00:21:15.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:15 vm10.local ceph-mon[82076]: pgmap v179: 161 pgs: 161 active+clean; 457 KiB data, 279 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:21:15.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:15 vm10.local ceph-mon[82076]: from='client.54445 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:21:15.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:15 vm10.local ceph-mon[82076]: from='client.? 192.168.123.104:0/2774763162' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T00:21:17.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:21:17 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:21:17.025Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:21:17.351 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:21:17 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:21:17.027Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:21:17.609 INFO:teuthology.orchestra.run.vm04.stdout:Initiating upgrade to quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T00:21:17.675 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:17 vm04.local ceph-mon[94619]: pgmap v180: 161 pgs: 161 active+clean; 457 KiB data, 279 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:21:17.675 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:17 vm04.local ceph-mon[94619]: from='client.44448 -' entity='client.admin' cmd=[{"prefix": "orch upgrade start", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "services": "rgw.foo", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:21:17.676 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:17 vm04.local ceph-mon[96438]: pgmap v180: 161 pgs: 161 active+clean; 457 KiB data, 279 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:21:17.676 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:17 vm04.local ceph-mon[96438]: from='client.44448 -' entity='client.admin' cmd=[{"prefix": "orch upgrade start", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "services": "rgw.foo", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:21:17.679 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'while ceph orch upgrade status | jq '"'"'.in_progress'"'"' | grep true && ! ceph orch upgrade status | jq '"'"'.message'"'"' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; sleep 30 ; done' 2026-03-09T00:21:17.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:17 vm10.local ceph-mon[82076]: pgmap v180: 161 pgs: 161 active+clean; 457 KiB data, 279 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:21:17.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:17 vm10.local ceph-mon[82076]: from='client.44448 -' entity='client.admin' cmd=[{"prefix": "orch upgrade start", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "services": "rgw.foo", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:21:18.200 INFO:teuthology.orchestra.run.vm04.stdout:true 2026-03-09T00:21:18.587 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-09T00:21:18.587 INFO:teuthology.orchestra.run.vm04.stdout:alertmanager.a vm04 *:9093,9094 running (18m) 2m ago 24m 28.2M - 0.25.0 c8568f914cd2 bcac0140b0f6 2026-03-09T00:21:18.587 INFO:teuthology.orchestra.run.vm04.stdout:grafana.a vm10 *:3000 running (5m) 37s ago 24m 90.1M - 10.4.0 c8b91775d855 aa7f793dcb8e 2026-03-09T00:21:18.587 INFO:teuthology.orchestra.run.vm04.stdout:iscsi.foo.vm04.fbyciv vm04 running (5m) 2m ago 24m 51.9M - 3.5 e1d6a67b021e cdb4168e72eb 2026-03-09T00:21:18.587 INFO:teuthology.orchestra.run.vm04.stdout:mgr.x vm10 *:8443,9283,8765 running (5m) 37s ago 26m 490M - 19.2.3-678-ge911bdeb 654f31e6858e c971ca6e9652 2026-03-09T00:21:18.587 INFO:teuthology.orchestra.run.vm04.stdout:mgr.y vm04 *:8443,9283,8765 running (15m) 2m ago 27m 557M - 19.2.3-678-ge911bdeb 654f31e6858e 72a51572b51b 2026-03-09T00:21:18.587 INFO:teuthology.orchestra.run.vm04.stdout:mon.a vm04 running (4m) 2m ago 27m 53.9M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 3a1ecb9ee7d1 2026-03-09T00:21:18.587 INFO:teuthology.orchestra.run.vm04.stdout:mon.b vm10 running (5m) 37s ago 26m 48.5M 2048M 19.2.3-678-ge911bdeb 654f31e6858e b102ade927df 2026-03-09T00:21:18.587 INFO:teuthology.orchestra.run.vm04.stdout:mon.c vm04 running (4m) 2m ago 26m 41.4M 2048M 19.2.3-678-ge911bdeb 654f31e6858e d5119f6d2345 2026-03-09T00:21:18.587 INFO:teuthology.orchestra.run.vm04.stdout:node-exporter.a vm04 *:9100 running (18m) 2m ago 24m 9.78M - 1.7.0 72c9c2088986 38e0af6b2fbf 2026-03-09T00:21:18.587 INFO:teuthology.orchestra.run.vm04.stdout:node-exporter.b vm10 *:9100 running (17m) 37s ago 24m 10.2M - 1.7.0 72c9c2088986 d059c0022310 2026-03-09T00:21:18.587 INFO:teuthology.orchestra.run.vm04.stdout:osd.0 vm04 running (4m) 2m ago 26m 70.7M 4096M 19.2.3-678-ge911bdeb 654f31e6858e a5eb77bcb38b 2026-03-09T00:21:18.587 INFO:teuthology.orchestra.run.vm04.stdout:osd.1 vm04 running (3m) 2m ago 26m 49.3M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 522cf40e592d 2026-03-09T00:21:18.587 INFO:teuthology.orchestra.run.vm04.stdout:osd.2 vm04 running (2m) 2m ago 26m 45.4M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 69a18f90367f 2026-03-09T00:21:18.587 INFO:teuthology.orchestra.run.vm04.stdout:osd.3 vm04 running (2m) 2m ago 25m 13.2M 4096M 19.2.3-678-ge911bdeb 654f31e6858e e8bef19a96a6 2026-03-09T00:21:18.587 INFO:teuthology.orchestra.run.vm04.stdout:osd.4 vm10 running (104s) 37s ago 25m 72.0M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 5cd5f044c189 2026-03-09T00:21:18.587 INFO:teuthology.orchestra.run.vm04.stdout:osd.5 vm10 running (82s) 37s ago 25m 49.0M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 50d8ee7c8cb6 2026-03-09T00:21:18.587 INFO:teuthology.orchestra.run.vm04.stdout:osd.6 vm10 running (60s) 37s ago 25m 47.5M 4096M 19.2.3-678-ge911bdeb 654f31e6858e d7d72f87911e 2026-03-09T00:21:18.587 INFO:teuthology.orchestra.run.vm04.stdout:osd.7 vm10 running (39s) 37s ago 25m 13.0M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 09c94bcb8e3f 2026-03-09T00:21:18.587 INFO:teuthology.orchestra.run.vm04.stdout:prometheus.a vm10 *:9095 running (5m) 37s ago 24m 52.4M - 2.51.0 1d3b7f56885b 1f53121cfa7f 2026-03-09T00:21:18.587 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.ehrfsf vm04 *:8000 running (24m) 2m ago 24m 100M - 17.2.0 e1d6a67b021e a815abb0c790 2026-03-09T00:21:18.587 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm10.dwizvi vm10 *:8000 running (24m) 37s ago 24m 98.8M - 17.2.0 e1d6a67b021e f6412acdf6e0 2026-03-09T00:21:18.833 INFO:teuthology.orchestra.run.vm04.stdout:{ 2026-03-09T00:21:18.834 INFO:teuthology.orchestra.run.vm04.stdout: "mon": { 2026-03-09T00:21:18.834 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 3 2026-03-09T00:21:18.834 INFO:teuthology.orchestra.run.vm04.stdout: }, 2026-03-09T00:21:18.834 INFO:teuthology.orchestra.run.vm04.stdout: "mgr": { 2026-03-09T00:21:18.834 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-09T00:21:18.834 INFO:teuthology.orchestra.run.vm04.stdout: }, 2026-03-09T00:21:18.834 INFO:teuthology.orchestra.run.vm04.stdout: "osd": { 2026-03-09T00:21:18.834 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 8 2026-03-09T00:21:18.834 INFO:teuthology.orchestra.run.vm04.stdout: }, 2026-03-09T00:21:18.834 INFO:teuthology.orchestra.run.vm04.stdout: "rgw": { 2026-03-09T00:21:18.834 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 2 2026-03-09T00:21:18.834 INFO:teuthology.orchestra.run.vm04.stdout: }, 2026-03-09T00:21:18.834 INFO:teuthology.orchestra.run.vm04.stdout: "overall": { 2026-03-09T00:21:18.834 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 2, 2026-03-09T00:21:18.834 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 13 2026-03-09T00:21:18.834 INFO:teuthology.orchestra.run.vm04.stdout: } 2026-03-09T00:21:18.834 INFO:teuthology.orchestra.run.vm04.stdout:} 2026-03-09T00:21:18.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:18 vm04.local ceph-mon[94619]: Upgrade: Started with target quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T00:21:18.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:18 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:18.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:18 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:21:18.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:18 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:21:18.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:18 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:21:18.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:18 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:18.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:18 vm04.local ceph-mon[94619]: Upgrade: First pull of quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T00:21:18.852 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:18 vm04.local ceph-mon[96438]: Upgrade: Started with target quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T00:21:18.852 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:18 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:18.852 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:18 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:21:18.852 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:18 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:21:18.852 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:18 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:21:18.852 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:18 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:18.852 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:18 vm04.local ceph-mon[96438]: Upgrade: First pull of quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T00:21:19.038 INFO:teuthology.orchestra.run.vm04.stdout:{ 2026-03-09T00:21:19.038 INFO:teuthology.orchestra.run.vm04.stdout: "target_image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", 2026-03-09T00:21:19.038 INFO:teuthology.orchestra.run.vm04.stdout: "in_progress": true, 2026-03-09T00:21:19.038 INFO:teuthology.orchestra.run.vm04.stdout: "which": "Upgrading daemons in service(s) rgw.foo", 2026-03-09T00:21:19.039 INFO:teuthology.orchestra.run.vm04.stdout: "services_complete": [], 2026-03-09T00:21:19.039 INFO:teuthology.orchestra.run.vm04.stdout: "progress": "", 2026-03-09T00:21:19.039 INFO:teuthology.orchestra.run.vm04.stdout: "message": "Doing first pull of quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df image", 2026-03-09T00:21:19.039 INFO:teuthology.orchestra.run.vm04.stdout: "is_paused": false 2026-03-09T00:21:19.039 INFO:teuthology.orchestra.run.vm04.stdout:} 2026-03-09T00:21:19.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:18 vm10.local ceph-mon[82076]: Upgrade: Started with target quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T00:21:19.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:18 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:19.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:18 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:21:19.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:18 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:21:19.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:18 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:21:19.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:18 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:19.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:18 vm10.local ceph-mon[82076]: Upgrade: First pull of quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T00:21:19.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:19 vm04.local ceph-mon[94619]: pgmap v181: 161 pgs: 161 active+clean; 457 KiB data, 279 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:21:19.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:19 vm04.local ceph-mon[94619]: from='client.34469 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:21:19.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:19 vm04.local ceph-mon[94619]: from='client.54460 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:21:19.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:19 vm04.local ceph-mon[94619]: from='client.44460 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:21:19.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:19 vm04.local ceph-mon[94619]: from='client.? 192.168.123.104:0/1436069598' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:19.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:19 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:19.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:19 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:21:19.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:19 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:19.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:19 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:19.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:19 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:19.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:19 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:19.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:19 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:19.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:19 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:19.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:19 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:19.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:19 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:19.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:19 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:19.852 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:19 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:19.852 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:19 vm04.local ceph-mon[96438]: pgmap v181: 161 pgs: 161 active+clean; 457 KiB data, 279 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:21:19.852 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:19 vm04.local ceph-mon[96438]: from='client.34469 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:21:19.852 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:19 vm04.local ceph-mon[96438]: from='client.54460 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:21:19.852 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:19 vm04.local ceph-mon[96438]: from='client.44460 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:21:19.852 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:19 vm04.local ceph-mon[96438]: from='client.? 192.168.123.104:0/1436069598' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:19.852 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:19 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:19.852 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:19 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:21:19.852 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:19 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:19.852 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:19 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:19.852 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:19 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:19.852 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:19 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:19.852 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:19 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:19.852 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:19 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:19.852 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:19 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:19.852 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:19 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:19.852 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:19 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:19.852 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:19 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:20.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:19 vm10.local ceph-mon[82076]: pgmap v181: 161 pgs: 161 active+clean; 457 KiB data, 279 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:21:20.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:19 vm10.local ceph-mon[82076]: from='client.34469 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:21:20.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:19 vm10.local ceph-mon[82076]: from='client.54460 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:21:20.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:19 vm10.local ceph-mon[82076]: from='client.44460 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:21:20.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:19 vm10.local ceph-mon[82076]: from='client.? 192.168.123.104:0/1436069598' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:20.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:19 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:20.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:19 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:21:20.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:19 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:20.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:19 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:20.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:19 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:20.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:19 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:20.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:19 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:20.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:19 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:20.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:19 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:20.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:19 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:20.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:19 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:20.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:19 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:21.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:20 vm10.local ceph-mon[82076]: from='client.34490 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:21:21.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:20 vm10.local ceph-mon[82076]: Upgrade: Target is version 19.2.3-678-ge911bdeb (squid) 2026-03-09T00:21:21.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:20 vm10.local ceph-mon[82076]: Upgrade: Target container is quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, digests ['quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc'] 2026-03-09T00:21:21.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:20 vm10.local ceph-mon[82076]: Upgrade: Setting container_image for all mgr 2026-03-09T00:21:21.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:20 vm10.local ceph-mon[82076]: Upgrade: Setting container_image for all mon 2026-03-09T00:21:21.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:20 vm10.local ceph-mon[82076]: Upgrade: Setting container_image for all crash 2026-03-09T00:21:21.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:20 vm10.local ceph-mon[82076]: Upgrade: Setting container_image for all osd 2026-03-09T00:21:21.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:20 vm10.local ceph-mon[82076]: Upgrade: Setting container_image for all mds 2026-03-09T00:21:21.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:20 vm10.local ceph-mon[82076]: Upgrade: Updating rgw.foo.vm04.ehrfsf (1/2) 2026-03-09T00:21:21.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:20 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:21.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:20 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm04.ehrfsf", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-09T00:21:21.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:20 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:21:21.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:20 vm10.local ceph-mon[82076]: Deploying daemon rgw.foo.vm04.ehrfsf on vm04 2026-03-09T00:21:21.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:20 vm04.local ceph-mon[94619]: from='client.34490 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:21:21.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:20 vm04.local ceph-mon[94619]: Upgrade: Target is version 19.2.3-678-ge911bdeb (squid) 2026-03-09T00:21:21.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:20 vm04.local ceph-mon[94619]: Upgrade: Target container is quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, digests ['quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc'] 2026-03-09T00:21:21.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:20 vm04.local ceph-mon[94619]: Upgrade: Setting container_image for all mgr 2026-03-09T00:21:21.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:20 vm04.local ceph-mon[94619]: Upgrade: Setting container_image for all mon 2026-03-09T00:21:21.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:20 vm04.local ceph-mon[94619]: Upgrade: Setting container_image for all crash 2026-03-09T00:21:21.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:20 vm04.local ceph-mon[94619]: Upgrade: Setting container_image for all osd 2026-03-09T00:21:21.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:20 vm04.local ceph-mon[94619]: Upgrade: Setting container_image for all mds 2026-03-09T00:21:21.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:20 vm04.local ceph-mon[94619]: Upgrade: Updating rgw.foo.vm04.ehrfsf (1/2) 2026-03-09T00:21:21.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:20 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:21.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:20 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm04.ehrfsf", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-09T00:21:21.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:20 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:21:21.102 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:20 vm04.local ceph-mon[94619]: Deploying daemon rgw.foo.vm04.ehrfsf on vm04 2026-03-09T00:21:21.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:20 vm04.local ceph-mon[96438]: from='client.34490 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:21:21.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:20 vm04.local ceph-mon[96438]: Upgrade: Target is version 19.2.3-678-ge911bdeb (squid) 2026-03-09T00:21:21.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:20 vm04.local ceph-mon[96438]: Upgrade: Target container is quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, digests ['quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc'] 2026-03-09T00:21:21.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:20 vm04.local ceph-mon[96438]: Upgrade: Setting container_image for all mgr 2026-03-09T00:21:21.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:20 vm04.local ceph-mon[96438]: Upgrade: Setting container_image for all mon 2026-03-09T00:21:21.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:20 vm04.local ceph-mon[96438]: Upgrade: Setting container_image for all crash 2026-03-09T00:21:21.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:20 vm04.local ceph-mon[96438]: Upgrade: Setting container_image for all osd 2026-03-09T00:21:21.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:20 vm04.local ceph-mon[96438]: Upgrade: Setting container_image for all mds 2026-03-09T00:21:21.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:20 vm04.local ceph-mon[96438]: Upgrade: Updating rgw.foo.vm04.ehrfsf (1/2) 2026-03-09T00:21:21.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:20 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:21.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:20 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm04.ehrfsf", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-09T00:21:21.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:20 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:21:21.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:20 vm04.local ceph-mon[96438]: Deploying daemon rgw.foo.vm04.ehrfsf on vm04 2026-03-09T00:21:21.822 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:21 vm10.local ceph-mon[82076]: pgmap v182: 161 pgs: 161 active+clean; 457 KiB data, 279 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:21:21.823 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:21 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:21.823 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:21 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:21.823 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:21 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:21.823 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:21 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm10.dwizvi", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-09T00:21:21.823 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:21 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:21:22.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:21 vm04.local ceph-mon[96438]: pgmap v182: 161 pgs: 161 active+clean; 457 KiB data, 279 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:21:22.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:21 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:22.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:21 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:22.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:21 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:22.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:21 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm10.dwizvi", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-09T00:21:22.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:21 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:21:22.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:21 vm04.local ceph-mon[94619]: pgmap v182: 161 pgs: 161 active+clean; 457 KiB data, 279 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:21:22.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:21 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:22.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:21 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:22.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:21 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:22.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:21 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm10.dwizvi", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-09T00:21:22.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:21 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:21:22.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:22 vm10.local ceph-mon[82076]: Upgrade: Updating rgw.foo.vm10.dwizvi (2/2) 2026-03-09T00:21:22.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:22 vm10.local ceph-mon[82076]: Deploying daemon rgw.foo.vm10.dwizvi on vm10 2026-03-09T00:21:22.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:22 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:22.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:22 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:23.075 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:22 vm04.local ceph-mon[96438]: Upgrade: Updating rgw.foo.vm10.dwizvi (2/2) 2026-03-09T00:21:23.075 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:22 vm04.local ceph-mon[96438]: Deploying daemon rgw.foo.vm10.dwizvi on vm10 2026-03-09T00:21:23.075 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:22 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:23.075 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:22 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:23.075 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:22 vm04.local ceph-mon[94619]: Upgrade: Updating rgw.foo.vm10.dwizvi (2/2) 2026-03-09T00:21:23.076 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:22 vm04.local ceph-mon[94619]: Deploying daemon rgw.foo.vm10.dwizvi on vm10 2026-03-09T00:21:23.076 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:22 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:23.076 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:22 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:23.981 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:23 vm10.local ceph-mon[82076]: pgmap v183: 161 pgs: 161 active+clean; 457 KiB data, 279 MiB used, 160 GiB / 160 GiB avail; 11 KiB/s rd, 0 B/s wr, 17 op/s 2026-03-09T00:21:23.981 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:23 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:21:24.007 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:23 vm04.local ceph-mon[94619]: pgmap v183: 161 pgs: 161 active+clean; 457 KiB data, 279 MiB used, 160 GiB / 160 GiB avail; 11 KiB/s rd, 0 B/s wr, 17 op/s 2026-03-09T00:21:24.007 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:23 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:21:24.007 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:23 vm04.local ceph-mon[96438]: pgmap v183: 161 pgs: 161 active+clean; 457 KiB data, 279 MiB used, 160 GiB / 160 GiB avail; 11 KiB/s rd, 0 B/s wr, 17 op/s 2026-03-09T00:21:24.007 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:23 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:21:24.854 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:24 vm04.local ceph-mon[94619]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:21:24.854 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:24 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:24.854 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:24 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:24.854 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:24 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:24.854 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:24 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:24.854 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:24 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:24.854 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:24 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:24.854 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:24 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:24.854 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:24 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:24.855 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:24 vm04.local ceph-mon[96438]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:21:24.855 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:24 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:24.855 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:24 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:24.855 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:24 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:24.855 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:24 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:24.855 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:24 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:24.855 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:24 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:24.855 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:24 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:24.855 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:24 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:25.018 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:24 vm10.local ceph-mon[82076]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:21:25.018 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:24 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:25.018 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:24 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:25.018 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:24 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:25.018 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:24 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:25.018 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:24 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:25.018 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:24 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:25.018 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:24 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:25.018 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:24 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:25.351 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:21:25] "GET /metrics HTTP/1.1" 200 37973 "" "Prometheus/2.51.0" 2026-03-09T00:21:26.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:25 vm10.local ceph-mon[82076]: pgmap v184: 161 pgs: 161 active+clean; 457 KiB data, 283 MiB used, 160 GiB / 160 GiB avail; 43 KiB/s rd, 0 B/s wr, 66 op/s 2026-03-09T00:21:26.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:25 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:26.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:25 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:26.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:25 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:26.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:25 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:26.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:25 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:21:26.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:25 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:21:26.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:25 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:26.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:25 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:21:26.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:25 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:26.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:25 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:26.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:25 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:26.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:25 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:26.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:25 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:26.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:25 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:26.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:25 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:26.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:25 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.foo.vm04.ehrfsf"}]: dispatch 2026-03-09T00:21:26.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:25 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.foo.vm04.ehrfsf"}]': finished 2026-03-09T00:21:26.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:25 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.foo.vm10.dwizvi"}]: dispatch 2026-03-09T00:21:26.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:25 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.foo.vm10.dwizvi"}]': finished 2026-03-09T00:21:26.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:25 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:26.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:25 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:26.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:25 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:26.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:25 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:26.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:25 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:26.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:25 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:26.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:25 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:26.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:25 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:26.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:25 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:26.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:25 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:26.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:25 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:26.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:25 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:26.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:25 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:26.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:25 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:26.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:25 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:26.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:25 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]: dispatch 2026-03-09T00:21:26.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:25 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]': finished 2026-03-09T00:21:26.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:25 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:21:26.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:25 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon"}]': finished 2026-03-09T00:21:26.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:25 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]: dispatch 2026-03-09T00:21:26.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:25 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]': finished 2026-03-09T00:21:26.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:25 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd"}]: dispatch 2026-03-09T00:21:26.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:25 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd"}]': finished 2026-03-09T00:21:26.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:25 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mds"}]: dispatch 2026-03-09T00:21:26.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:25 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mds"}]': finished 2026-03-09T00:21:26.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:25 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw"}]: dispatch 2026-03-09T00:21:26.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:25 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rgw"}]': finished 2026-03-09T00:21:26.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:25 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]: dispatch 2026-03-09T00:21:26.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:25 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]': finished 2026-03-09T00:21:26.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:25 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:21:26.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:25 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]: dispatch 2026-03-09T00:21:26.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:25 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]': finished 2026-03-09T00:21:26.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:25 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.iscsi"}]: dispatch 2026-03-09T00:21:26.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:25 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]: dispatch 2026-03-09T00:21:26.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:25 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]': finished 2026-03-09T00:21:26.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:25 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]: dispatch 2026-03-09T00:21:26.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:25 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]': finished 2026-03-09T00:21:26.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:25 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:21:26.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:25 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:21:26.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:25 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:21:26.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:25 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:21:26.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:25 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:21:26.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:25 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:21:26.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:25 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]: dispatch 2026-03-09T00:21:26.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:25 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]': finished 2026-03-09T00:21:26.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:25 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:21:26.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:25 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:21:26.079 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:25 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:21:26.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[96438]: pgmap v184: 161 pgs: 161 active+clean; 457 KiB data, 283 MiB used, 160 GiB / 160 GiB avail; 43 KiB/s rd, 0 B/s wr, 66 op/s 2026-03-09T00:21:26.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:26.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:26.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:26.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:26.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:21:26.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:21:26.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:26.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:21:26.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:26.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:26.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:26.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:26.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:26.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:26.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:26.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.foo.vm04.ehrfsf"}]: dispatch 2026-03-09T00:21:26.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.foo.vm04.ehrfsf"}]': finished 2026-03-09T00:21:26.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.foo.vm10.dwizvi"}]: dispatch 2026-03-09T00:21:26.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.foo.vm10.dwizvi"}]': finished 2026-03-09T00:21:26.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:26.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:26.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:26.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:26.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:26.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:26.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:26.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:26.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:26.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:26.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:26.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:26.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:26.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:26.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:26.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]: dispatch 2026-03-09T00:21:26.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]': finished 2026-03-09T00:21:26.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:21:26.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon"}]': finished 2026-03-09T00:21:26.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]: dispatch 2026-03-09T00:21:26.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]': finished 2026-03-09T00:21:26.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd"}]: dispatch 2026-03-09T00:21:26.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd"}]': finished 2026-03-09T00:21:26.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mds"}]: dispatch 2026-03-09T00:21:26.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mds"}]': finished 2026-03-09T00:21:26.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw"}]: dispatch 2026-03-09T00:21:26.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rgw"}]': finished 2026-03-09T00:21:26.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]: dispatch 2026-03-09T00:21:26.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]': finished 2026-03-09T00:21:26.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:21:26.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]: dispatch 2026-03-09T00:21:26.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]': finished 2026-03-09T00:21:26.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.iscsi"}]: dispatch 2026-03-09T00:21:26.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]: dispatch 2026-03-09T00:21:26.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]': finished 2026-03-09T00:21:26.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]: dispatch 2026-03-09T00:21:26.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]': finished 2026-03-09T00:21:26.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:21:26.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:21:26.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:21:26.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:21:26.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:21:26.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:21:26.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]: dispatch 2026-03-09T00:21:26.102 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]': finished 2026-03-09T00:21:26.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:21:26.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:21:26.103 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:21:26.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[94619]: pgmap v184: 161 pgs: 161 active+clean; 457 KiB data, 283 MiB used, 160 GiB / 160 GiB avail; 43 KiB/s rd, 0 B/s wr, 66 op/s 2026-03-09T00:21:26.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:26.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:26.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:26.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:26.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:21:26.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:21:26.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:26.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:21:26.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:26.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:26.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:26.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:26.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:26.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:26.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:26.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.foo.vm04.ehrfsf"}]: dispatch 2026-03-09T00:21:26.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.foo.vm04.ehrfsf"}]': finished 2026-03-09T00:21:26.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.foo.vm10.dwizvi"}]: dispatch 2026-03-09T00:21:26.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.foo.vm10.dwizvi"}]': finished 2026-03-09T00:21:26.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:26.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:26.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:26.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:26.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:26.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:26.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:26.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:26.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:26.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:26.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:26.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:26.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:26.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:26.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:26.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]: dispatch 2026-03-09T00:21:26.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr"}]': finished 2026-03-09T00:21:26.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:21:26.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon"}]': finished 2026-03-09T00:21:26.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]: dispatch 2026-03-09T00:21:26.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.crash"}]': finished 2026-03-09T00:21:26.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd"}]: dispatch 2026-03-09T00:21:26.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd"}]': finished 2026-03-09T00:21:26.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mds"}]: dispatch 2026-03-09T00:21:26.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mds"}]': finished 2026-03-09T00:21:26.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw"}]: dispatch 2026-03-09T00:21:26.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rgw"}]': finished 2026-03-09T00:21:26.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]: dispatch 2026-03-09T00:21:26.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rbd-mirror"}]': finished 2026-03-09T00:21:26.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:21:26.103 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]: dispatch 2026-03-09T00:21:26.104 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.ceph-exporter"}]': finished 2026-03-09T00:21:26.104 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.iscsi"}]: dispatch 2026-03-09T00:21:26.104 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]: dispatch 2026-03-09T00:21:26.104 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nfs"}]': finished 2026-03-09T00:21:26.104 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]: dispatch 2026-03-09T00:21:26.104 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.nvmeof"}]': finished 2026-03-09T00:21:26.104 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:21:26.104 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:21:26.104 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:21:26.104 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:21:26.104 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:21:26.104 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon"}]: dispatch 2026-03-09T00:21:26.104 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]: dispatch 2026-03-09T00:21:26.104 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd='[{"prefix":"config-key del","key":"mgr/cephadm/upgrade_state"}]': finished 2026-03-09T00:21:26.104 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:21:26.104 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:21:26.104 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:25 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:21:27.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:26 vm10.local ceph-mon[82076]: Upgrade: Setting container_image for all rgw 2026-03-09T00:21:27.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:26 vm10.local ceph-mon[82076]: Upgrade: Setting container_image for all rbd-mirror 2026-03-09T00:21:27.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:26 vm10.local ceph-mon[82076]: Upgrade: Setting container_image for all ceph-exporter 2026-03-09T00:21:27.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:26 vm10.local ceph-mon[82076]: Upgrade: Setting container_image for all nfs 2026-03-09T00:21:27.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:26 vm10.local ceph-mon[82076]: Upgrade: Setting container_image for all nvmeof 2026-03-09T00:21:27.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:26 vm10.local ceph-mon[82076]: Upgrade: Finalizing container_image settings 2026-03-09T00:21:27.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:26 vm10.local ceph-mon[82076]: Upgrade: Complete! 2026-03-09T00:21:27.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:26 vm10.local ceph-mon[82076]: Checking dashboard <-> RGW credentials 2026-03-09T00:21:27.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:26 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:27.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:26 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:21:27.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:26 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:21:27.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:26 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:21:27.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:26 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:27.100 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:21:27 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:21:27.026Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:21:27.100 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:21:27 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:21:27.027Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:21:27.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:26 vm04.local ceph-mon[96438]: Upgrade: Setting container_image for all rgw 2026-03-09T00:21:27.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:26 vm04.local ceph-mon[96438]: Upgrade: Setting container_image for all rbd-mirror 2026-03-09T00:21:27.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:26 vm04.local ceph-mon[96438]: Upgrade: Setting container_image for all ceph-exporter 2026-03-09T00:21:27.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:26 vm04.local ceph-mon[96438]: Upgrade: Setting container_image for all nfs 2026-03-09T00:21:27.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:26 vm04.local ceph-mon[96438]: Upgrade: Setting container_image for all nvmeof 2026-03-09T00:21:27.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:26 vm04.local ceph-mon[96438]: Upgrade: Finalizing container_image settings 2026-03-09T00:21:27.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:26 vm04.local ceph-mon[96438]: Upgrade: Complete! 2026-03-09T00:21:27.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:26 vm04.local ceph-mon[96438]: Checking dashboard <-> RGW credentials 2026-03-09T00:21:27.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:26 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:27.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:26 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:21:27.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:26 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:21:27.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:26 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:21:27.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:26 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:27.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:26 vm04.local ceph-mon[94619]: Upgrade: Setting container_image for all rgw 2026-03-09T00:21:27.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:26 vm04.local ceph-mon[94619]: Upgrade: Setting container_image for all rbd-mirror 2026-03-09T00:21:27.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:26 vm04.local ceph-mon[94619]: Upgrade: Setting container_image for all ceph-exporter 2026-03-09T00:21:27.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:26 vm04.local ceph-mon[94619]: Upgrade: Setting container_image for all nfs 2026-03-09T00:21:27.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:26 vm04.local ceph-mon[94619]: Upgrade: Setting container_image for all nvmeof 2026-03-09T00:21:27.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:26 vm04.local ceph-mon[94619]: Upgrade: Finalizing container_image settings 2026-03-09T00:21:27.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:26 vm04.local ceph-mon[94619]: Upgrade: Complete! 2026-03-09T00:21:27.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:26 vm04.local ceph-mon[94619]: Checking dashboard <-> RGW credentials 2026-03-09T00:21:27.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:26 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:27.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:26 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:21:27.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:26 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:21:27.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:26 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:21:27.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:26 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:28.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:27 vm10.local ceph-mon[82076]: pgmap v185: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 129 KiB/s rd, 170 B/s wr, 203 op/s 2026-03-09T00:21:28.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:27 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:21:28.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:27 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:28.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:27 vm04.local ceph-mon[96438]: pgmap v185: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 129 KiB/s rd, 170 B/s wr, 203 op/s 2026-03-09T00:21:28.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:27 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:21:28.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:27 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:28.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:27 vm04.local ceph-mon[94619]: pgmap v185: 161 pgs: 161 active+clean; 457 KiB data, 287 MiB used, 160 GiB / 160 GiB avail; 129 KiB/s rd, 170 B/s wr, 203 op/s 2026-03-09T00:21:28.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:27 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:21:28.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:27 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:30.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:29 vm10.local ceph-mon[82076]: pgmap v186: 161 pgs: 161 active+clean; 457 KiB data, 291 MiB used, 160 GiB / 160 GiB avail; 156 KiB/s rd, 170 B/s wr, 246 op/s 2026-03-09T00:21:30.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:29 vm04.local ceph-mon[96438]: pgmap v186: 161 pgs: 161 active+clean; 457 KiB data, 291 MiB used, 160 GiB / 160 GiB avail; 156 KiB/s rd, 170 B/s wr, 246 op/s 2026-03-09T00:21:30.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:29 vm04.local ceph-mon[94619]: pgmap v186: 161 pgs: 161 active+clean; 457 KiB data, 291 MiB used, 160 GiB / 160 GiB avail; 156 KiB/s rd, 170 B/s wr, 246 op/s 2026-03-09T00:21:32.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:31 vm10.local ceph-mon[82076]: pgmap v187: 161 pgs: 161 active+clean; 457 KiB data, 291 MiB used, 160 GiB / 160 GiB avail; 158 KiB/s rd, 170 B/s wr, 249 op/s 2026-03-09T00:21:32.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:31 vm04.local ceph-mon[96438]: pgmap v187: 161 pgs: 161 active+clean; 457 KiB data, 291 MiB used, 160 GiB / 160 GiB avail; 158 KiB/s rd, 170 B/s wr, 249 op/s 2026-03-09T00:21:32.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:31 vm04.local ceph-mon[94619]: pgmap v187: 161 pgs: 161 active+clean; 457 KiB data, 291 MiB used, 160 GiB / 160 GiB avail; 158 KiB/s rd, 170 B/s wr, 249 op/s 2026-03-09T00:21:33.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:32 vm10.local ceph-mon[82076]: pgmap v188: 161 pgs: 161 active+clean; 457 KiB data, 291 MiB used, 160 GiB / 160 GiB avail; 159 KiB/s rd, 170 B/s wr, 250 op/s 2026-03-09T00:21:33.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:32 vm04.local ceph-mon[96438]: pgmap v188: 161 pgs: 161 active+clean; 457 KiB data, 291 MiB used, 160 GiB / 160 GiB avail; 159 KiB/s rd, 170 B/s wr, 250 op/s 2026-03-09T00:21:33.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:32 vm04.local ceph-mon[94619]: pgmap v188: 161 pgs: 161 active+clean; 457 KiB data, 291 MiB used, 160 GiB / 160 GiB avail; 159 KiB/s rd, 170 B/s wr, 250 op/s 2026-03-09T00:21:34.229 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:33 vm10.local ceph-mon[82076]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:21:34.252 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:33 vm04.local ceph-mon[96438]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:21:34.252 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:33 vm04.local ceph-mon[94619]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:21:35.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:34 vm10.local ceph-mon[82076]: pgmap v189: 161 pgs: 161 active+clean; 457 KiB data, 291 MiB used, 160 GiB / 160 GiB avail; 148 KiB/s rd, 170 B/s wr, 233 op/s 2026-03-09T00:21:35.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:34 vm04.local ceph-mon[94619]: pgmap v189: 161 pgs: 161 active+clean; 457 KiB data, 291 MiB used, 160 GiB / 160 GiB avail; 148 KiB/s rd, 170 B/s wr, 233 op/s 2026-03-09T00:21:35.351 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:21:35 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:21:35] "GET /metrics HTTP/1.1" 200 38004 "" "Prometheus/2.51.0" 2026-03-09T00:21:35.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:34 vm04.local ceph-mon[96438]: pgmap v189: 161 pgs: 161 active+clean; 457 KiB data, 291 MiB used, 160 GiB / 160 GiB avail; 148 KiB/s rd, 170 B/s wr, 233 op/s 2026-03-09T00:21:37.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:36 vm10.local ceph-mon[82076]: pgmap v190: 161 pgs: 161 active+clean; 457 KiB data, 291 MiB used, 160 GiB / 160 GiB avail; 117 KiB/s rd, 170 B/s wr, 184 op/s 2026-03-09T00:21:37.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:21:37 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:21:37.027Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:21:37.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:21:37 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:21:37.028Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:21:37.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:36 vm04.local ceph-mon[96438]: pgmap v190: 161 pgs: 161 active+clean; 457 KiB data, 291 MiB used, 160 GiB / 160 GiB avail; 117 KiB/s rd, 170 B/s wr, 184 op/s 2026-03-09T00:21:37.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:36 vm04.local ceph-mon[94619]: pgmap v190: 161 pgs: 161 active+clean; 457 KiB data, 291 MiB used, 160 GiB / 160 GiB avail; 117 KiB/s rd, 170 B/s wr, 184 op/s 2026-03-09T00:21:39.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:39 vm10.local ceph-mon[82076]: pgmap v191: 161 pgs: 161 active+clean; 457 KiB data, 291 MiB used, 160 GiB / 160 GiB avail; 31 KiB/s rd, 0 B/s wr, 47 op/s 2026-03-09T00:21:39.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:39 vm04.local ceph-mon[96438]: pgmap v191: 161 pgs: 161 active+clean; 457 KiB data, 291 MiB used, 160 GiB / 160 GiB avail; 31 KiB/s rd, 0 B/s wr, 47 op/s 2026-03-09T00:21:39.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:38 vm04.local ceph-mon[94619]: pgmap v191: 161 pgs: 161 active+clean; 457 KiB data, 291 MiB used, 160 GiB / 160 GiB avail; 31 KiB/s rd, 0 B/s wr, 47 op/s 2026-03-09T00:21:41.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:41 vm10.local ceph-mon[82076]: pgmap v192: 161 pgs: 161 active+clean; 457 KiB data, 291 MiB used, 160 GiB / 160 GiB avail; 3.5 KiB/s rd, 0 B/s wr, 4 op/s 2026-03-09T00:21:41.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:41 vm04.local ceph-mon[96438]: pgmap v192: 161 pgs: 161 active+clean; 457 KiB data, 291 MiB used, 160 GiB / 160 GiB avail; 3.5 KiB/s rd, 0 B/s wr, 4 op/s 2026-03-09T00:21:41.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:41 vm04.local ceph-mon[94619]: pgmap v192: 161 pgs: 161 active+clean; 457 KiB data, 291 MiB used, 160 GiB / 160 GiB avail; 3.5 KiB/s rd, 0 B/s wr, 4 op/s 2026-03-09T00:21:42.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:42 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:21:42.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:42 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:21:42.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:42 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:21:43.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:43 vm10.local ceph-mon[82076]: pgmap v193: 161 pgs: 161 active+clean; 457 KiB data, 291 MiB used, 160 GiB / 160 GiB avail; 1.9 KiB/s rd, 2 op/s 2026-03-09T00:21:43.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:43 vm04.local ceph-mon[96438]: pgmap v193: 161 pgs: 161 active+clean; 457 KiB data, 291 MiB used, 160 GiB / 160 GiB avail; 1.9 KiB/s rd, 2 op/s 2026-03-09T00:21:43.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:43 vm04.local ceph-mon[94619]: pgmap v193: 161 pgs: 161 active+clean; 457 KiB data, 291 MiB used, 160 GiB / 160 GiB avail; 1.9 KiB/s rd, 2 op/s 2026-03-09T00:21:44.276 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:44 vm10.local ceph-mon[82076]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:21:44.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:44 vm04.local ceph-mon[96438]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:21:44.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:44 vm04.local ceph-mon[94619]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:21:45.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:45 vm10.local ceph-mon[82076]: pgmap v194: 161 pgs: 161 active+clean; 457 KiB data, 291 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:21:45.350 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:21:45 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:21:45] "GET /metrics HTTP/1.1" 200 38005 "" "Prometheus/2.51.0" 2026-03-09T00:21:45.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:45 vm04.local ceph-mon[96438]: pgmap v194: 161 pgs: 161 active+clean; 457 KiB data, 291 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:21:45.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:45 vm04.local ceph-mon[94619]: pgmap v194: 161 pgs: 161 active+clean; 457 KiB data, 291 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:21:47.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:47 vm10.local ceph-mon[82076]: pgmap v195: 161 pgs: 161 active+clean; 457 KiB data, 291 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:21:47.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:21:47 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:21:47.029Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:21:47.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:21:47 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:21:47.030Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:21:47.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:47 vm04.local ceph-mon[96438]: pgmap v195: 161 pgs: 161 active+clean; 457 KiB data, 291 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:21:47.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:47 vm04.local ceph-mon[94619]: pgmap v195: 161 pgs: 161 active+clean; 457 KiB data, 291 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:21:49.282 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:49 vm04.local ceph-mon[94619]: pgmap v196: 161 pgs: 161 active+clean; 457 KiB data, 291 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:21:49.282 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:49 vm04.local ceph-mon[96438]: pgmap v196: 161 pgs: 161 active+clean; 457 KiB data, 291 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:21:49.308 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch ps' 2026-03-09T00:21:49.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:49 vm10.local ceph-mon[82076]: pgmap v196: 161 pgs: 161 active+clean; 457 KiB data, 291 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:21:49.799 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-09T00:21:49.799 INFO:teuthology.orchestra.run.vm04.stdout:alertmanager.a vm04 *:9093,9094 running (18m) 25s ago 25m 28.2M - 0.25.0 c8568f914cd2 bcac0140b0f6 2026-03-09T00:21:49.799 INFO:teuthology.orchestra.run.vm04.stdout:grafana.a vm10 *:3000 running (6m) 25s ago 24m 90.1M - 10.4.0 c8b91775d855 aa7f793dcb8e 2026-03-09T00:21:49.799 INFO:teuthology.orchestra.run.vm04.stdout:iscsi.foo.vm04.fbyciv vm04 running (6m) 25s ago 24m 52.4M - 3.5 e1d6a67b021e cdb4168e72eb 2026-03-09T00:21:49.799 INFO:teuthology.orchestra.run.vm04.stdout:mgr.x vm10 *:8443,9283,8765 running (6m) 25s ago 27m 490M - 19.2.3-678-ge911bdeb 654f31e6858e c971ca6e9652 2026-03-09T00:21:49.799 INFO:teuthology.orchestra.run.vm04.stdout:mgr.y vm04 *:8443,9283,8765 running (15m) 25s ago 27m 563M - 19.2.3-678-ge911bdeb 654f31e6858e 72a51572b51b 2026-03-09T00:21:49.799 INFO:teuthology.orchestra.run.vm04.stdout:mon.a vm04 running (5m) 25s ago 27m 60.6M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 3a1ecb9ee7d1 2026-03-09T00:21:49.799 INFO:teuthology.orchestra.run.vm04.stdout:mon.b vm10 running (5m) 25s ago 27m 50.1M 2048M 19.2.3-678-ge911bdeb 654f31e6858e b102ade927df 2026-03-09T00:21:49.799 INFO:teuthology.orchestra.run.vm04.stdout:mon.c vm04 running (5m) 25s ago 27m 48.0M 2048M 19.2.3-678-ge911bdeb 654f31e6858e d5119f6d2345 2026-03-09T00:21:49.799 INFO:teuthology.orchestra.run.vm04.stdout:node-exporter.a vm04 *:9100 running (18m) 25s ago 25m 9.79M - 1.7.0 72c9c2088986 38e0af6b2fbf 2026-03-09T00:21:49.799 INFO:teuthology.orchestra.run.vm04.stdout:node-exporter.b vm10 *:9100 running (18m) 25s ago 25m 10.3M - 1.7.0 72c9c2088986 d059c0022310 2026-03-09T00:21:49.799 INFO:teuthology.orchestra.run.vm04.stdout:osd.0 vm04 running (4m) 25s ago 26m 77.1M 4096M 19.2.3-678-ge911bdeb 654f31e6858e a5eb77bcb38b 2026-03-09T00:21:49.799 INFO:teuthology.orchestra.run.vm04.stdout:osd.1 vm04 running (4m) 25s ago 26m 56.6M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 522cf40e592d 2026-03-09T00:21:49.799 INFO:teuthology.orchestra.run.vm04.stdout:osd.2 vm04 running (3m) 25s ago 26m 50.9M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 69a18f90367f 2026-03-09T00:21:49.799 INFO:teuthology.orchestra.run.vm04.stdout:osd.3 vm04 running (2m) 25s ago 26m 74.9M 4096M 19.2.3-678-ge911bdeb 654f31e6858e e8bef19a96a6 2026-03-09T00:21:49.799 INFO:teuthology.orchestra.run.vm04.stdout:osd.4 vm10 running (2m) 25s ago 26m 75.4M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 5cd5f044c189 2026-03-09T00:21:49.799 INFO:teuthology.orchestra.run.vm04.stdout:osd.5 vm10 running (113s) 25s ago 26m 50.7M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 50d8ee7c8cb6 2026-03-09T00:21:49.799 INFO:teuthology.orchestra.run.vm04.stdout:osd.6 vm10 running (92s) 25s ago 25m 50.6M 4096M 19.2.3-678-ge911bdeb 654f31e6858e d7d72f87911e 2026-03-09T00:21:49.799 INFO:teuthology.orchestra.run.vm04.stdout:osd.7 vm10 running (70s) 25s ago 25m 70.4M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 09c94bcb8e3f 2026-03-09T00:21:49.799 INFO:teuthology.orchestra.run.vm04.stdout:prometheus.a vm10 *:9095 running (6m) 25s ago 25m 52.6M - 2.51.0 1d3b7f56885b 1f53121cfa7f 2026-03-09T00:21:49.799 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.ehrfsf vm04 *:8000 running (29s) 25s ago 24m 99.2M - 19.2.3-678-ge911bdeb 654f31e6858e 55cfeda28e66 2026-03-09T00:21:49.799 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm10.dwizvi vm10 *:8000 running (27s) 25s ago 24m 98.7M - 19.2.3-678-ge911bdeb 654f31e6858e 1c9761d360d7 2026-03-09T00:21:49.848 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph versions | jq -e '"'"'.rgw | length == 1'"'"'' 2026-03-09T00:21:50.075 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:50 vm04.local ceph-mon[94619]: from='client.44532 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:21:50.075 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:50 vm04.local ceph-mon[94619]: from='client.54541 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:21:50.075 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:50 vm04.local ceph-mon[96438]: from='client.44532 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:21:50.075 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:50 vm04.local ceph-mon[96438]: from='client.54541 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:21:50.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:50 vm10.local ceph-mon[82076]: from='client.44532 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:21:50.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:50 vm10.local ceph-mon[82076]: from='client.54541 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:21:50.355 INFO:teuthology.orchestra.run.vm04.stdout:true 2026-03-09T00:21:50.405 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph versions | jq -e '"'"'.rgw | keys'"'"' | grep $sha1' 2026-03-09T00:21:50.933 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)" 2026-03-09T00:21:50.990 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch upgrade status' 2026-03-09T00:21:51.186 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:51 vm04.local ceph-mon[96438]: pgmap v197: 161 pgs: 161 active+clean; 457 KiB data, 291 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:21:51.186 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:51 vm04.local ceph-mon[96438]: from='client.? 192.168.123.104:0/1068162814' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:51.186 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:51 vm04.local ceph-mon[96438]: from='client.? 192.168.123.104:0/978961315' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:51.187 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:51 vm04.local ceph-mon[94619]: pgmap v197: 161 pgs: 161 active+clean; 457 KiB data, 291 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:21:51.187 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:51 vm04.local ceph-mon[94619]: from='client.? 192.168.123.104:0/1068162814' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:51.187 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:51 vm04.local ceph-mon[94619]: from='client.? 192.168.123.104:0/978961315' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:51.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:51 vm10.local ceph-mon[82076]: pgmap v197: 161 pgs: 161 active+clean; 457 KiB data, 291 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:21:51.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:51 vm10.local ceph-mon[82076]: from='client.? 192.168.123.104:0/1068162814' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:51.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:51 vm10.local ceph-mon[82076]: from='client.? 192.168.123.104:0/978961315' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:51.561 INFO:teuthology.orchestra.run.vm04.stdout:{ 2026-03-09T00:21:51.561 INFO:teuthology.orchestra.run.vm04.stdout: "target_image": null, 2026-03-09T00:21:51.561 INFO:teuthology.orchestra.run.vm04.stdout: "in_progress": false, 2026-03-09T00:21:51.561 INFO:teuthology.orchestra.run.vm04.stdout: "which": "", 2026-03-09T00:21:51.561 INFO:teuthology.orchestra.run.vm04.stdout: "services_complete": [], 2026-03-09T00:21:51.561 INFO:teuthology.orchestra.run.vm04.stdout: "progress": null, 2026-03-09T00:21:51.561 INFO:teuthology.orchestra.run.vm04.stdout: "message": "", 2026-03-09T00:21:51.561 INFO:teuthology.orchestra.run.vm04.stdout: "is_paused": false 2026-03-09T00:21:51.561 INFO:teuthology.orchestra.run.vm04.stdout:} 2026-03-09T00:21:51.663 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph health detail' 2026-03-09T00:21:52.101 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:52 vm04.local ceph-mon[96438]: from='client.44547 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:21:52.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:52 vm04.local ceph-mon[94619]: from='client.44547 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:21:52.195 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_OK 2026-03-09T00:21:52.238 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1' 2026-03-09T00:21:52.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:52 vm10.local ceph-mon[82076]: from='client.44547 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:21:52.724 INFO:teuthology.orchestra.run.vm04.stdout:Initiating upgrade to quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T00:21:52.785 INFO:teuthology.run_tasks:Running task cephadm.shell... 2026-03-09T00:21:52.788 INFO:tasks.cephadm:Running commands on role mon.a host ubuntu@vm04.local 2026-03-09T00:21:52.788 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'while ceph orch upgrade status | jq '"'"'.in_progress'"'"' | grep true && ! ceph orch upgrade status | jq '"'"'.message'"'"' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; ceph health detail ; sleep 30 ; done' 2026-03-09T00:21:53.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:53 vm10.local ceph-mon[82076]: pgmap v198: 161 pgs: 161 active+clean; 457 KiB data, 291 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:21:53.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:53 vm10.local ceph-mon[82076]: from='client.? 192.168.123.104:0/1603441788' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T00:21:53.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:53 vm10.local ceph-mon[82076]: from='client.34568 -' entity='client.admin' cmd=[{"prefix": "orch upgrade start", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:21:53.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:53 vm10.local ceph-mon[82076]: Upgrade: Started with target quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T00:21:53.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:53 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:53.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:53 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:21:53.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:53 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:21:53.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:53 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:21:53.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:53 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:53.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:53 vm10.local ceph-mon[82076]: Upgrade: First pull of quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T00:21:53.339 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:53 vm04.local ceph-mon[94619]: pgmap v198: 161 pgs: 161 active+clean; 457 KiB data, 291 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:21:53.339 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:53 vm04.local ceph-mon[94619]: from='client.? 192.168.123.104:0/1603441788' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T00:21:53.339 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:53 vm04.local ceph-mon[94619]: from='client.34568 -' entity='client.admin' cmd=[{"prefix": "orch upgrade start", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:21:53.339 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:53 vm04.local ceph-mon[94619]: Upgrade: Started with target quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T00:21:53.339 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:53 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:53.339 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:53 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:21:53.339 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:53 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:21:53.339 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:53 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:21:53.339 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:53 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:53.339 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:53 vm04.local ceph-mon[94619]: Upgrade: First pull of quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T00:21:53.339 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:53 vm04.local ceph-mon[96438]: pgmap v198: 161 pgs: 161 active+clean; 457 KiB data, 291 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:21:53.339 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:53 vm04.local ceph-mon[96438]: from='client.? 192.168.123.104:0/1603441788' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T00:21:53.339 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:53 vm04.local ceph-mon[96438]: from='client.34568 -' entity='client.admin' cmd=[{"prefix": "orch upgrade start", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:21:53.339 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:53 vm04.local ceph-mon[96438]: Upgrade: Started with target quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T00:21:53.339 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:53 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:53.339 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:53 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:21:53.339 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:53 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:21:53.339 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:53 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:21:53.339 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:53 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:53.339 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:53 vm04.local ceph-mon[96438]: Upgrade: First pull of quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T00:21:53.340 INFO:teuthology.orchestra.run.vm04.stdout:true 2026-03-09T00:21:53.721 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-09T00:21:53.721 INFO:teuthology.orchestra.run.vm04.stdout:alertmanager.a vm04 *:9093,9094 running (18m) 29s ago 25m 28.2M - 0.25.0 c8568f914cd2 bcac0140b0f6 2026-03-09T00:21:53.721 INFO:teuthology.orchestra.run.vm04.stdout:grafana.a vm10 *:3000 running (6m) 29s ago 24m 90.1M - 10.4.0 c8b91775d855 aa7f793dcb8e 2026-03-09T00:21:53.721 INFO:teuthology.orchestra.run.vm04.stdout:iscsi.foo.vm04.fbyciv vm04 running (6m) 29s ago 24m 52.4M - 3.5 e1d6a67b021e cdb4168e72eb 2026-03-09T00:21:53.721 INFO:teuthology.orchestra.run.vm04.stdout:mgr.x vm10 *:8443,9283,8765 running (6m) 29s ago 27m 490M - 19.2.3-678-ge911bdeb 654f31e6858e c971ca6e9652 2026-03-09T00:21:53.721 INFO:teuthology.orchestra.run.vm04.stdout:mgr.y vm04 *:8443,9283,8765 running (15m) 29s ago 27m 563M - 19.2.3-678-ge911bdeb 654f31e6858e 72a51572b51b 2026-03-09T00:21:53.721 INFO:teuthology.orchestra.run.vm04.stdout:mon.a vm04 running (5m) 29s ago 27m 60.6M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 3a1ecb9ee7d1 2026-03-09T00:21:53.721 INFO:teuthology.orchestra.run.vm04.stdout:mon.b vm10 running (5m) 29s ago 27m 50.1M 2048M 19.2.3-678-ge911bdeb 654f31e6858e b102ade927df 2026-03-09T00:21:53.721 INFO:teuthology.orchestra.run.vm04.stdout:mon.c vm04 running (5m) 29s ago 27m 48.0M 2048M 19.2.3-678-ge911bdeb 654f31e6858e d5119f6d2345 2026-03-09T00:21:53.721 INFO:teuthology.orchestra.run.vm04.stdout:node-exporter.a vm04 *:9100 running (18m) 29s ago 25m 9.79M - 1.7.0 72c9c2088986 38e0af6b2fbf 2026-03-09T00:21:53.721 INFO:teuthology.orchestra.run.vm04.stdout:node-exporter.b vm10 *:9100 running (18m) 29s ago 25m 10.3M - 1.7.0 72c9c2088986 d059c0022310 2026-03-09T00:21:53.721 INFO:teuthology.orchestra.run.vm04.stdout:osd.0 vm04 running (4m) 29s ago 26m 77.1M 4096M 19.2.3-678-ge911bdeb 654f31e6858e a5eb77bcb38b 2026-03-09T00:21:53.721 INFO:teuthology.orchestra.run.vm04.stdout:osd.1 vm04 running (4m) 29s ago 26m 56.6M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 522cf40e592d 2026-03-09T00:21:53.721 INFO:teuthology.orchestra.run.vm04.stdout:osd.2 vm04 running (3m) 29s ago 26m 50.9M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 69a18f90367f 2026-03-09T00:21:53.721 INFO:teuthology.orchestra.run.vm04.stdout:osd.3 vm04 running (2m) 29s ago 26m 74.9M 4096M 19.2.3-678-ge911bdeb 654f31e6858e e8bef19a96a6 2026-03-09T00:21:53.721 INFO:teuthology.orchestra.run.vm04.stdout:osd.4 vm10 running (2m) 29s ago 26m 75.4M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 5cd5f044c189 2026-03-09T00:21:53.721 INFO:teuthology.orchestra.run.vm04.stdout:osd.5 vm10 running (117s) 29s ago 26m 50.7M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 50d8ee7c8cb6 2026-03-09T00:21:53.721 INFO:teuthology.orchestra.run.vm04.stdout:osd.6 vm10 running (95s) 29s ago 25m 50.6M 4096M 19.2.3-678-ge911bdeb 654f31e6858e d7d72f87911e 2026-03-09T00:21:53.721 INFO:teuthology.orchestra.run.vm04.stdout:osd.7 vm10 running (74s) 29s ago 25m 70.4M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 09c94bcb8e3f 2026-03-09T00:21:53.721 INFO:teuthology.orchestra.run.vm04.stdout:prometheus.a vm10 *:9095 running (6m) 29s ago 25m 52.6M - 2.51.0 1d3b7f56885b 1f53121cfa7f 2026-03-09T00:21:53.721 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.ehrfsf vm04 *:8000 running (32s) 29s ago 24m 99.2M - 19.2.3-678-ge911bdeb 654f31e6858e 55cfeda28e66 2026-03-09T00:21:53.721 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm10.dwizvi vm10 *:8000 running (31s) 29s ago 24m 98.7M - 19.2.3-678-ge911bdeb 654f31e6858e 1c9761d360d7 2026-03-09T00:21:53.957 INFO:teuthology.orchestra.run.vm04.stdout:{ 2026-03-09T00:21:53.957 INFO:teuthology.orchestra.run.vm04.stdout: "mon": { 2026-03-09T00:21:53.957 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 3 2026-03-09T00:21:53.957 INFO:teuthology.orchestra.run.vm04.stdout: }, 2026-03-09T00:21:53.957 INFO:teuthology.orchestra.run.vm04.stdout: "mgr": { 2026-03-09T00:21:53.957 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-09T00:21:53.957 INFO:teuthology.orchestra.run.vm04.stdout: }, 2026-03-09T00:21:53.957 INFO:teuthology.orchestra.run.vm04.stdout: "osd": { 2026-03-09T00:21:53.957 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 8 2026-03-09T00:21:53.957 INFO:teuthology.orchestra.run.vm04.stdout: }, 2026-03-09T00:21:53.957 INFO:teuthology.orchestra.run.vm04.stdout: "rgw": { 2026-03-09T00:21:53.957 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-09T00:21:53.957 INFO:teuthology.orchestra.run.vm04.stdout: }, 2026-03-09T00:21:53.957 INFO:teuthology.orchestra.run.vm04.stdout: "overall": { 2026-03-09T00:21:53.957 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 15 2026-03-09T00:21:53.957 INFO:teuthology.orchestra.run.vm04.stdout: } 2026-03-09T00:21:53.957 INFO:teuthology.orchestra.run.vm04.stdout:} 2026-03-09T00:21:54.159 INFO:teuthology.orchestra.run.vm04.stdout:{ 2026-03-09T00:21:54.159 INFO:teuthology.orchestra.run.vm04.stdout: "target_image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", 2026-03-09T00:21:54.159 INFO:teuthology.orchestra.run.vm04.stdout: "in_progress": true, 2026-03-09T00:21:54.159 INFO:teuthology.orchestra.run.vm04.stdout: "which": "Upgrading all daemon types on all hosts", 2026-03-09T00:21:54.159 INFO:teuthology.orchestra.run.vm04.stdout: "services_complete": [], 2026-03-09T00:21:54.159 INFO:teuthology.orchestra.run.vm04.stdout: "progress": "", 2026-03-09T00:21:54.159 INFO:teuthology.orchestra.run.vm04.stdout: "message": "Doing first pull of quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df image", 2026-03-09T00:21:54.159 INFO:teuthology.orchestra.run.vm04.stdout: "is_paused": false 2026-03-09T00:21:54.159 INFO:teuthology.orchestra.run.vm04.stdout:} 2026-03-09T00:21:54.326 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:54 vm04.local ceph-mon[94619]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:21:54.326 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:54 vm04.local ceph-mon[94619]: from='client.44559 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:21:54.326 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:54 vm04.local ceph-mon[94619]: from='client.54571 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:21:54.326 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:54 vm04.local ceph-mon[94619]: from='client.54577 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:21:54.326 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:54 vm04.local ceph-mon[94619]: from='client.? 192.168.123.104:0/777958587' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:54.326 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:54 vm04.local ceph-mon[96438]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:21:54.326 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:54 vm04.local ceph-mon[96438]: from='client.44559 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:21:54.326 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:54 vm04.local ceph-mon[96438]: from='client.54571 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:21:54.326 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:54 vm04.local ceph-mon[96438]: from='client.54577 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:21:54.326 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:54 vm04.local ceph-mon[96438]: from='client.? 192.168.123.104:0/777958587' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:54.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:54 vm10.local ceph-mon[82076]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:21:54.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:54 vm10.local ceph-mon[82076]: from='client.44559 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:21:54.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:54 vm10.local ceph-mon[82076]: from='client.54571 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:21:54.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:54 vm10.local ceph-mon[82076]: from='client.54577 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:21:54.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:54 vm10.local ceph-mon[82076]: from='client.? 192.168.123.104:0/777958587' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:54.452 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_OK 2026-03-09T00:21:55.278 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:21:55 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:21:55] "GET /metrics HTTP/1.1" 200 38005 "" "Prometheus/2.51.0" 2026-03-09T00:21:55.602 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:55 vm04.local ceph-mon[96438]: pgmap v199: 161 pgs: 161 active+clean; 457 KiB data, 291 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:21:55.602 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:55 vm04.local ceph-mon[96438]: from='client.44580 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:21:55.602 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:55 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:55.602 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:55 vm04.local ceph-mon[96438]: Upgrade: Target is version 19.2.3-678-ge911bdeb (squid) 2026-03-09T00:21:55.602 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:55 vm04.local ceph-mon[96438]: Upgrade: Target container is quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, digests ['quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc'] 2026-03-09T00:21:55.602 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:55 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:21:55.602 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:55 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:55.602 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:55 vm04.local ceph-mon[96438]: Upgrade: Setting container_image for all mgr 2026-03-09T00:21:55.602 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:55 vm04.local ceph-mon[96438]: from='client.? 192.168.123.104:0/4186270214' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T00:21:55.602 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:55 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:55.602 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:55 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:55.602 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:55 vm04.local ceph-mon[96438]: Upgrade: Setting container_image for all mon 2026-03-09T00:21:55.602 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:55 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:55.602 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:55 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:55.602 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:55 vm04.local ceph-mon[96438]: Upgrade: Setting container_image for all crash 2026-03-09T00:21:55.602 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:55 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:55.602 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:55 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:55.602 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:55 vm04.local ceph-mon[96438]: Upgrade: Setting container_image for all osd 2026-03-09T00:21:55.602 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:55 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:55.602 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:55 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:55.602 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:55 vm04.local ceph-mon[96438]: Upgrade: Setting container_image for all mds 2026-03-09T00:21:55.602 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:55 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:55.602 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:55 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:55.602 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:55 vm04.local ceph-mon[96438]: Upgrade: Setting container_image for all rgw 2026-03-09T00:21:55.602 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:55 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:55.602 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:55 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:55.603 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:55 vm04.local ceph-mon[96438]: Upgrade: Setting container_image for all rbd-mirror 2026-03-09T00:21:55.603 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:55 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:55.603 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:55 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:55.603 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:55 vm04.local ceph-mon[96438]: Upgrade: Setting container_image for all cephfs-mirror 2026-03-09T00:21:55.603 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:55 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:55.603 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:55 vm04.local ceph-mon[96438]: Upgrade: Setting container_image for all ceph-exporter 2026-03-09T00:21:55.603 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:55 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:55.603 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:55 vm04.local ceph-mon[96438]: Upgrade: Updating iscsi.foo.vm04.fbyciv 2026-03-09T00:21:55.603 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:55 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:55.603 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:55 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm04.fbyciv", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T00:21:55.603 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:55 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:21:55.603 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:55 vm04.local ceph-mon[96438]: Deploying daemon iscsi.foo.vm04.fbyciv on vm04 2026-03-09T00:21:55.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:55 vm04.local ceph-mon[94619]: pgmap v199: 161 pgs: 161 active+clean; 457 KiB data, 291 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:21:55.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:55 vm04.local ceph-mon[94619]: from='client.44580 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:21:55.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:55 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:55.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:55 vm04.local ceph-mon[94619]: Upgrade: Target is version 19.2.3-678-ge911bdeb (squid) 2026-03-09T00:21:55.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:55 vm04.local ceph-mon[94619]: Upgrade: Target container is quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, digests ['quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc'] 2026-03-09T00:21:55.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:55 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:21:55.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:55 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:55.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:55 vm04.local ceph-mon[94619]: Upgrade: Setting container_image for all mgr 2026-03-09T00:21:55.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:55 vm04.local ceph-mon[94619]: from='client.? 192.168.123.104:0/4186270214' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T00:21:55.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:55 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:55.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:55 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:55.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:55 vm04.local ceph-mon[94619]: Upgrade: Setting container_image for all mon 2026-03-09T00:21:55.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:55 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:55.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:55 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:55.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:55 vm04.local ceph-mon[94619]: Upgrade: Setting container_image for all crash 2026-03-09T00:21:55.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:55 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:55.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:55 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:55.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:55 vm04.local ceph-mon[94619]: Upgrade: Setting container_image for all osd 2026-03-09T00:21:55.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:55 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:55.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:55 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:55.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:55 vm04.local ceph-mon[94619]: Upgrade: Setting container_image for all mds 2026-03-09T00:21:55.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:55 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:55.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:55 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:55.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:55 vm04.local ceph-mon[94619]: Upgrade: Setting container_image for all rgw 2026-03-09T00:21:55.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:55 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:55.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:55 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:55.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:55 vm04.local ceph-mon[94619]: Upgrade: Setting container_image for all rbd-mirror 2026-03-09T00:21:55.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:55 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:55.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:55 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:55.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:55 vm04.local ceph-mon[94619]: Upgrade: Setting container_image for all cephfs-mirror 2026-03-09T00:21:55.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:55 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:55.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:55 vm04.local ceph-mon[94619]: Upgrade: Setting container_image for all ceph-exporter 2026-03-09T00:21:55.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:55 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:55.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:55 vm04.local ceph-mon[94619]: Upgrade: Updating iscsi.foo.vm04.fbyciv 2026-03-09T00:21:55.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:55 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:55.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:55 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm04.fbyciv", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T00:21:55.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:55 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:21:55.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:55 vm04.local ceph-mon[94619]: Deploying daemon iscsi.foo.vm04.fbyciv on vm04 2026-03-09T00:21:55.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:55 vm10.local ceph-mon[82076]: pgmap v199: 161 pgs: 161 active+clean; 457 KiB data, 291 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:21:55.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:55 vm10.local ceph-mon[82076]: from='client.44580 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:21:55.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:55 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:55.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:55 vm10.local ceph-mon[82076]: Upgrade: Target is version 19.2.3-678-ge911bdeb (squid) 2026-03-09T00:21:55.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:55 vm10.local ceph-mon[82076]: Upgrade: Target container is quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, digests ['quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc'] 2026-03-09T00:21:55.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:55 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:21:55.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:55 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:55.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:55 vm10.local ceph-mon[82076]: Upgrade: Setting container_image for all mgr 2026-03-09T00:21:55.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:55 vm10.local ceph-mon[82076]: from='client.? 192.168.123.104:0/4186270214' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T00:21:55.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:55 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:55.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:55 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:55.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:55 vm10.local ceph-mon[82076]: Upgrade: Setting container_image for all mon 2026-03-09T00:21:55.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:55 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:55.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:55 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:55.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:55 vm10.local ceph-mon[82076]: Upgrade: Setting container_image for all crash 2026-03-09T00:21:55.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:55 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:55.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:55 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:55.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:55 vm10.local ceph-mon[82076]: Upgrade: Setting container_image for all osd 2026-03-09T00:21:55.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:55 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:55.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:55 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:55.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:55 vm10.local ceph-mon[82076]: Upgrade: Setting container_image for all mds 2026-03-09T00:21:55.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:55 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:55.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:55 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:55.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:55 vm10.local ceph-mon[82076]: Upgrade: Setting container_image for all rgw 2026-03-09T00:21:55.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:55 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:55.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:55 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:55.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:55 vm10.local ceph-mon[82076]: Upgrade: Setting container_image for all rbd-mirror 2026-03-09T00:21:55.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:55 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:55.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:55 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:55.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:55 vm10.local ceph-mon[82076]: Upgrade: Setting container_image for all cephfs-mirror 2026-03-09T00:21:55.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:55 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:21:55.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:55 vm10.local ceph-mon[82076]: Upgrade: Setting container_image for all ceph-exporter 2026-03-09T00:21:55.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:55 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:55.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:55 vm10.local ceph-mon[82076]: Upgrade: Updating iscsi.foo.vm04.fbyciv 2026-03-09T00:21:55.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:55 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:21:55.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:55 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm04.fbyciv", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T00:21:55.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:55 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:21:55.829 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:55 vm10.local ceph-mon[82076]: Deploying daemon iscsi.foo.vm04.fbyciv on vm04 2026-03-09T00:21:57.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:21:57 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:21:57.029Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:21:57.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:21:57 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:21:57.030Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:21:57.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:57 vm10.local ceph-mon[82076]: pgmap v200: 161 pgs: 161 active+clean; 457 KiB data, 291 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:21:57.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:57 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:21:57.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:57 vm04.local ceph-mon[96438]: pgmap v200: 161 pgs: 161 active+clean; 457 KiB data, 291 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:21:57.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:57 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:21:57.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:57 vm04.local ceph-mon[94619]: pgmap v200: 161 pgs: 161 active+clean; 457 KiB data, 291 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:21:57.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:57 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:21:59.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:21:59 vm10.local ceph-mon[82076]: pgmap v201: 161 pgs: 161 active+clean; 457 KiB data, 291 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:21:59.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:21:59 vm04.local ceph-mon[96438]: pgmap v201: 161 pgs: 161 active+clean; 457 KiB data, 291 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:21:59.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:21:59 vm04.local ceph-mon[94619]: pgmap v201: 161 pgs: 161 active+clean; 457 KiB data, 291 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:22:01.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:22:01 vm04.local ceph-mon[96438]: pgmap v202: 161 pgs: 161 active+clean; 457 KiB data, 291 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:22:01.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:22:01 vm04.local ceph-mon[94619]: pgmap v202: 161 pgs: 161 active+clean; 457 KiB data, 291 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:22:01.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:22:01 vm10.local ceph-mon[82076]: pgmap v202: 161 pgs: 161 active+clean; 457 KiB data, 291 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:22:03.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:22:03 vm10.local ceph-mon[82076]: pgmap v203: 161 pgs: 161 active+clean; 457 KiB data, 291 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:22:03.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:22:03 vm04.local ceph-mon[96438]: pgmap v203: 161 pgs: 161 active+clean; 457 KiB data, 291 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:22:03.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:22:03 vm04.local ceph-mon[94619]: pgmap v203: 161 pgs: 161 active+clean; 457 KiB data, 291 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:22:04.735 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:22:04 vm10.local ceph-mon[82076]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:22:04.751 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:22:04 vm04.local ceph-mon[94619]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:22:04.752 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:22:04 vm04.local ceph-mon[96438]: from='client.15237 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:22:05.281 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:22:05 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:22:05] "GET /metrics HTTP/1.1" 200 38010 "" "Prometheus/2.51.0" 2026-03-09T00:22:05.603 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:22:05 vm04.local ceph-mon[96438]: pgmap v204: 161 pgs: 161 active+clean; 457 KiB data, 291 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:22:05.603 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:22:05 vm04.local ceph-mon[94619]: pgmap v204: 161 pgs: 161 active+clean; 457 KiB data, 291 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:22:05.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:22:05 vm10.local ceph-mon[82076]: pgmap v204: 161 pgs: 161 active+clean; 457 KiB data, 291 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:22:05.893 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:22:05 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: 2026-03-09T00:22:05.698+0000 7fe14d0a5640 -1 log_channel(cephadm) log [ERR] : Upgrade: Paused due to UPGRADE_REDEPLOY_DAEMON: Upgrading daemon iscsi.foo.vm04.fbyciv on host vm04 failed. 2026-03-09T00:22:06.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:22:06 vm10.local ceph-mon[82076]: Upgrade: Paused due to UPGRADE_REDEPLOY_DAEMON: Upgrading daemon iscsi.foo.vm04.fbyciv on host vm04 failed. 2026-03-09T00:22:06.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:22:06 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:22:06.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:22:06 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:22:06.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:22:06 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:22:06.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:22:06 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:22:06.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:22:06 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:22:06.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:22:06 vm04.local ceph-mon[96438]: Upgrade: Paused due to UPGRADE_REDEPLOY_DAEMON: Upgrading daemon iscsi.foo.vm04.fbyciv on host vm04 failed. 2026-03-09T00:22:06.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:22:06 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:22:06.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:22:06 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:22:06.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:22:06 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:22:06.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:22:06 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:22:06.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:22:06 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:22:06.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:22:06 vm04.local ceph-mon[94619]: Upgrade: Paused due to UPGRADE_REDEPLOY_DAEMON: Upgrading daemon iscsi.foo.vm04.fbyciv on host vm04 failed. 2026-03-09T00:22:06.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:22:06 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:22:06.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:22:06 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:22:06.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:22:06 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:22:06.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:22:06 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:22:06.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:22:06 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:22:07.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:22:07 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:22:07.030Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:22:07.351 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:22:07 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:22:07.031Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:22:07.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:22:07 vm10.local ceph-mon[82076]: pgmap v205: 161 pgs: 161 active+clean; 457 KiB data, 291 MiB used, 160 GiB / 160 GiB avail; 1.3 KiB/s rd, 1 op/s 2026-03-09T00:22:07.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:22:07 vm10.local ceph-mon[82076]: Health check failed: Upgrading daemon iscsi.foo.vm04.fbyciv on host vm04 failed. (UPGRADE_REDEPLOY_DAEMON) 2026-03-09T00:22:07.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:22:07 vm04.local ceph-mon[96438]: pgmap v205: 161 pgs: 161 active+clean; 457 KiB data, 291 MiB used, 160 GiB / 160 GiB avail; 1.3 KiB/s rd, 1 op/s 2026-03-09T00:22:07.851 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:22:07 vm04.local ceph-mon[96438]: Health check failed: Upgrading daemon iscsi.foo.vm04.fbyciv on host vm04 failed. (UPGRADE_REDEPLOY_DAEMON) 2026-03-09T00:22:07.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:22:07 vm04.local ceph-mon[94619]: pgmap v205: 161 pgs: 161 active+clean; 457 KiB data, 291 MiB used, 160 GiB / 160 GiB avail; 1.3 KiB/s rd, 1 op/s 2026-03-09T00:22:07.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:22:07 vm04.local ceph-mon[94619]: Health check failed: Upgrading daemon iscsi.foo.vm04.fbyciv on host vm04 failed. (UPGRADE_REDEPLOY_DAEMON) 2026-03-09T00:22:09.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:22:09 vm10.local ceph-mon[82076]: pgmap v206: 161 pgs: 161 active+clean; 457 KiB data, 291 MiB used, 160 GiB / 160 GiB avail; 859 B/s rd, 0 op/s 2026-03-09T00:22:09.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:22:09 vm04.local ceph-mon[96438]: pgmap v206: 161 pgs: 161 active+clean; 457 KiB data, 291 MiB used, 160 GiB / 160 GiB avail; 859 B/s rd, 0 op/s 2026-03-09T00:22:09.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:22:09 vm04.local ceph-mon[94619]: pgmap v206: 161 pgs: 161 active+clean; 457 KiB data, 291 MiB used, 160 GiB / 160 GiB avail; 859 B/s rd, 0 op/s 2026-03-09T00:22:12.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:22:11 vm10.local ceph-mon[82076]: pgmap v207: 161 pgs: 161 active+clean; 457 KiB data, 291 MiB used, 160 GiB / 160 GiB avail; 859 B/s rd, 0 op/s 2026-03-09T00:22:12.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:22:11 vm04.local ceph-mon[96438]: pgmap v207: 161 pgs: 161 active+clean; 457 KiB data, 291 MiB used, 160 GiB / 160 GiB avail; 859 B/s rd, 0 op/s 2026-03-09T00:22:12.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:22:11 vm04.local ceph-mon[94619]: pgmap v207: 161 pgs: 161 active+clean; 457 KiB data, 291 MiB used, 160 GiB / 160 GiB avail; 859 B/s rd, 0 op/s 2026-03-09T00:22:13.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:22:12 vm10.local ceph-mon[82076]: pgmap v208: 161 pgs: 161 active+clean; 457 KiB data, 291 MiB used, 160 GiB / 160 GiB avail; 859 B/s rd, 0 op/s 2026-03-09T00:22:13.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:22:12 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:22:13.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:22:12 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:22:13.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:22:12 vm04.local ceph-mon[96438]: pgmap v208: 161 pgs: 161 active+clean; 457 KiB data, 291 MiB used, 160 GiB / 160 GiB avail; 859 B/s rd, 0 op/s 2026-03-09T00:22:13.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:22:12 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:22:13.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:22:12 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:22:13.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:22:12 vm04.local ceph-mon[94619]: pgmap v208: 161 pgs: 161 active+clean; 457 KiB data, 291 MiB used, 160 GiB / 160 GiB avail; 859 B/s rd, 0 op/s 2026-03-09T00:22:13.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:22:12 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:22:13.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:22:12 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:22:15.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:22:14 vm10.local ceph-mon[82076]: pgmap v209: 161 pgs: 161 active+clean; 457 KiB data, 291 MiB used, 160 GiB / 160 GiB avail; 429 B/s rd, 0 op/s 2026-03-09T00:22:15.350 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:22:15 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:22:15] "GET /metrics HTTP/1.1" 200 38088 "" "Prometheus/2.51.0" 2026-03-09T00:22:15.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:22:14 vm04.local ceph-mon[96438]: pgmap v209: 161 pgs: 161 active+clean; 457 KiB data, 291 MiB used, 160 GiB / 160 GiB avail; 429 B/s rd, 0 op/s 2026-03-09T00:22:15.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:22:14 vm04.local ceph-mon[94619]: pgmap v209: 161 pgs: 161 active+clean; 457 KiB data, 291 MiB used, 160 GiB / 160 GiB avail; 429 B/s rd, 0 op/s 2026-03-09T00:22:17.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:22:16 vm10.local ceph-mon[82076]: pgmap v210: 161 pgs: 161 active+clean; 457 KiB data, 291 MiB used, 160 GiB / 160 GiB avail; 429 B/s rd, 0 op/s 2026-03-09T00:22:17.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:22:16 vm10.local ceph-mon[82076]: from='client.? 192.168.123.104:0/352301859' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-09T00:22:17.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:22:16 vm10.local ceph-mon[82076]: from='client.? 192.168.123.104:0/2747351661' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:6801/1485512100"}]: dispatch 2026-03-09T00:22:17.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:22:17 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:22:17.031Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:22:17.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:22:17 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:22:17.032Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:22:17.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:22:16 vm04.local ceph-mon[96438]: pgmap v210: 161 pgs: 161 active+clean; 457 KiB data, 291 MiB used, 160 GiB / 160 GiB avail; 429 B/s rd, 0 op/s 2026-03-09T00:22:17.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:22:16 vm04.local ceph-mon[96438]: from='client.? 192.168.123.104:0/352301859' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-09T00:22:17.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:22:16 vm04.local ceph-mon[96438]: from='client.? 192.168.123.104:0/2747351661' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:6801/1485512100"}]: dispatch 2026-03-09T00:22:17.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:22:16 vm04.local ceph-mon[94619]: pgmap v210: 161 pgs: 161 active+clean; 457 KiB data, 291 MiB used, 160 GiB / 160 GiB avail; 429 B/s rd, 0 op/s 2026-03-09T00:22:17.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:22:16 vm04.local ceph-mon[94619]: from='client.? 192.168.123.104:0/352301859' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-09T00:22:17.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:22:16 vm04.local ceph-mon[94619]: from='client.? 192.168.123.104:0/2747351661' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:6801/1485512100"}]: dispatch 2026-03-09T00:22:18.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:22:17 vm10.local ceph-mon[82076]: from='client.? 192.168.123.104:0/2747351661' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:6801/1485512100"}]': finished 2026-03-09T00:22:18.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:22:17 vm10.local ceph-mon[82076]: osdmap e138: 8 total, 8 up, 8 in 2026-03-09T00:22:18.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:22:17 vm10.local ceph-mon[82076]: from='client.? 192.168.123.104:0/988302133' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:6800/1485512100"}]: dispatch 2026-03-09T00:22:18.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:22:17 vm04.local ceph-mon[96438]: from='client.? 192.168.123.104:0/2747351661' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:6801/1485512100"}]': finished 2026-03-09T00:22:18.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:22:17 vm04.local ceph-mon[96438]: osdmap e138: 8 total, 8 up, 8 in 2026-03-09T00:22:18.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:22:17 vm04.local ceph-mon[96438]: from='client.? 192.168.123.104:0/988302133' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:6800/1485512100"}]: dispatch 2026-03-09T00:22:18.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:22:17 vm04.local ceph-mon[94619]: from='client.? 192.168.123.104:0/2747351661' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:6801/1485512100"}]': finished 2026-03-09T00:22:18.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:22:17 vm04.local ceph-mon[94619]: osdmap e138: 8 total, 8 up, 8 in 2026-03-09T00:22:18.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:22:17 vm04.local ceph-mon[94619]: from='client.? 192.168.123.104:0/988302133' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:6800/1485512100"}]: dispatch 2026-03-09T00:22:19.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:22:18 vm10.local ceph-mon[82076]: pgmap v212: 161 pgs: 161 active+clean; 457 KiB data, 291 MiB used, 160 GiB / 160 GiB avail 2026-03-09T00:22:19.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:22:18 vm10.local ceph-mon[82076]: from='client.? 192.168.123.104:0/988302133' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:6800/1485512100"}]': finished 2026-03-09T00:22:19.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:22:18 vm10.local ceph-mon[82076]: osdmap e139: 8 total, 8 up, 8 in 2026-03-09T00:22:19.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:22:18 vm10.local ceph-mon[82076]: from='client.? 192.168.123.104:0/2633000012' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/4147796894"}]: dispatch 2026-03-09T00:22:19.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:22:18 vm04.local ceph-mon[96438]: pgmap v212: 161 pgs: 161 active+clean; 457 KiB data, 291 MiB used, 160 GiB / 160 GiB avail 2026-03-09T00:22:19.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:22:18 vm04.local ceph-mon[96438]: from='client.? 192.168.123.104:0/988302133' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:6800/1485512100"}]': finished 2026-03-09T00:22:19.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:22:18 vm04.local ceph-mon[96438]: osdmap e139: 8 total, 8 up, 8 in 2026-03-09T00:22:19.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:22:18 vm04.local ceph-mon[96438]: from='client.? 192.168.123.104:0/2633000012' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/4147796894"}]: dispatch 2026-03-09T00:22:19.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:22:18 vm04.local ceph-mon[94619]: pgmap v212: 161 pgs: 161 active+clean; 457 KiB data, 291 MiB used, 160 GiB / 160 GiB avail 2026-03-09T00:22:19.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:22:18 vm04.local ceph-mon[94619]: from='client.? 192.168.123.104:0/988302133' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:6800/1485512100"}]': finished 2026-03-09T00:22:19.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:22:18 vm04.local ceph-mon[94619]: osdmap e139: 8 total, 8 up, 8 in 2026-03-09T00:22:19.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:22:18 vm04.local ceph-mon[94619]: from='client.? 192.168.123.104:0/2633000012' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/4147796894"}]: dispatch 2026-03-09T00:22:20.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:22:19 vm10.local ceph-mon[82076]: from='client.? 192.168.123.104:0/2633000012' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/4147796894"}]': finished 2026-03-09T00:22:20.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:22:19 vm10.local ceph-mon[82076]: osdmap e140: 8 total, 8 up, 8 in 2026-03-09T00:22:20.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:22:19 vm10.local ceph-mon[82076]: from='client.? 192.168.123.104:0/1012578613' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/3703234089"}]: dispatch 2026-03-09T00:22:20.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:22:19 vm04.local ceph-mon[96438]: from='client.? 192.168.123.104:0/2633000012' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/4147796894"}]': finished 2026-03-09T00:22:20.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:22:19 vm04.local ceph-mon[96438]: osdmap e140: 8 total, 8 up, 8 in 2026-03-09T00:22:20.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:22:19 vm04.local ceph-mon[96438]: from='client.? 192.168.123.104:0/1012578613' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/3703234089"}]: dispatch 2026-03-09T00:22:20.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:22:19 vm04.local ceph-mon[94619]: from='client.? 192.168.123.104:0/2633000012' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/4147796894"}]': finished 2026-03-09T00:22:20.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:22:19 vm04.local ceph-mon[94619]: osdmap e140: 8 total, 8 up, 8 in 2026-03-09T00:22:20.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:22:19 vm04.local ceph-mon[94619]: from='client.? 192.168.123.104:0/1012578613' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/3703234089"}]: dispatch 2026-03-09T00:22:21.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:22:20 vm10.local ceph-mon[82076]: pgmap v215: 161 pgs: 161 active+clean; 457 KiB data, 291 MiB used, 160 GiB / 160 GiB avail 2026-03-09T00:22:21.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:22:20 vm10.local ceph-mon[82076]: from='client.? 192.168.123.104:0/1012578613' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/3703234089"}]': finished 2026-03-09T00:22:21.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:22:20 vm10.local ceph-mon[82076]: osdmap e141: 8 total, 8 up, 8 in 2026-03-09T00:22:21.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:22:20 vm10.local ceph-mon[82076]: from='client.? 192.168.123.104:0/3194921977' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/2765919715"}]: dispatch 2026-03-09T00:22:21.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:22:20 vm10.local ceph-mon[82076]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/2765919715"}]: dispatch 2026-03-09T00:22:21.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:22:20 vm04.local ceph-mon[96438]: pgmap v215: 161 pgs: 161 active+clean; 457 KiB data, 291 MiB used, 160 GiB / 160 GiB avail 2026-03-09T00:22:21.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:22:20 vm04.local ceph-mon[96438]: from='client.? 192.168.123.104:0/1012578613' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/3703234089"}]': finished 2026-03-09T00:22:21.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:22:20 vm04.local ceph-mon[96438]: osdmap e141: 8 total, 8 up, 8 in 2026-03-09T00:22:21.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:22:20 vm04.local ceph-mon[96438]: from='client.? 192.168.123.104:0/3194921977' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/2765919715"}]: dispatch 2026-03-09T00:22:21.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:22:20 vm04.local ceph-mon[96438]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/2765919715"}]: dispatch 2026-03-09T00:22:21.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:22:20 vm04.local ceph-mon[94619]: pgmap v215: 161 pgs: 161 active+clean; 457 KiB data, 291 MiB used, 160 GiB / 160 GiB avail 2026-03-09T00:22:21.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:22:20 vm04.local ceph-mon[94619]: from='client.? 192.168.123.104:0/1012578613' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/3703234089"}]': finished 2026-03-09T00:22:21.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:22:20 vm04.local ceph-mon[94619]: osdmap e141: 8 total, 8 up, 8 in 2026-03-09T00:22:21.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:22:20 vm04.local ceph-mon[94619]: from='client.? 192.168.123.104:0/3194921977' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/2765919715"}]: dispatch 2026-03-09T00:22:21.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:22:20 vm04.local ceph-mon[94619]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/2765919715"}]: dispatch 2026-03-09T00:22:22.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:22:21 vm10.local ceph-mon[82076]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/2765919715"}]': finished 2026-03-09T00:22:22.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:22:21 vm10.local ceph-mon[82076]: osdmap e142: 8 total, 8 up, 8 in 2026-03-09T00:22:22.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:22:21 vm10.local ceph-mon[82076]: from='client.? 192.168.123.104:0/2596906534' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/4057953826"}]: dispatch 2026-03-09T00:22:22.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:22:21 vm10.local ceph-mon[82076]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/4057953826"}]: dispatch 2026-03-09T00:22:22.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:22:21 vm04.local ceph-mon[96438]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/2765919715"}]': finished 2026-03-09T00:22:22.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:22:21 vm04.local ceph-mon[96438]: osdmap e142: 8 total, 8 up, 8 in 2026-03-09T00:22:22.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:22:21 vm04.local ceph-mon[96438]: from='client.? 192.168.123.104:0/2596906534' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/4057953826"}]: dispatch 2026-03-09T00:22:22.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:22:21 vm04.local ceph-mon[96438]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/4057953826"}]: dispatch 2026-03-09T00:22:22.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:22:21 vm04.local ceph-mon[94619]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/2765919715"}]': finished 2026-03-09T00:22:22.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:22:21 vm04.local ceph-mon[94619]: osdmap e142: 8 total, 8 up, 8 in 2026-03-09T00:22:22.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:22:21 vm04.local ceph-mon[94619]: from='client.? 192.168.123.104:0/2596906534' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/4057953826"}]: dispatch 2026-03-09T00:22:22.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:22:21 vm04.local ceph-mon[94619]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/4057953826"}]: dispatch 2026-03-09T00:22:23.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:22:22 vm10.local ceph-mon[82076]: pgmap v218: 161 pgs: 161 active+clean; 457 KiB data, 291 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 1 op/s 2026-03-09T00:22:23.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:22:22 vm10.local ceph-mon[82076]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/4057953826"}]': finished 2026-03-09T00:22:23.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:22:22 vm10.local ceph-mon[82076]: osdmap e143: 8 total, 8 up, 8 in 2026-03-09T00:22:23.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:22:22 vm04.local ceph-mon[96438]: pgmap v218: 161 pgs: 161 active+clean; 457 KiB data, 291 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 1 op/s 2026-03-09T00:22:23.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:22:22 vm04.local ceph-mon[96438]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/4057953826"}]': finished 2026-03-09T00:22:23.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:22:22 vm04.local ceph-mon[96438]: osdmap e143: 8 total, 8 up, 8 in 2026-03-09T00:22:23.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:22:22 vm04.local ceph-mon[94619]: pgmap v218: 161 pgs: 161 active+clean; 457 KiB data, 291 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 1 op/s 2026-03-09T00:22:23.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:22:22 vm04.local ceph-mon[94619]: from='client.? ' entity='client.iscsi.foo.vm04.fbyciv' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.104:0/4057953826"}]': finished 2026-03-09T00:22:23.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:22:22 vm04.local ceph-mon[94619]: osdmap e143: 8 total, 8 up, 8 in 2026-03-09T00:22:24.685 INFO:teuthology.orchestra.run.vm04.stdout:true 2026-03-09T00:22:24.890 INFO:teuthology.orchestra.run.vm04.stdout:"Error: UPGRADE_REDEPLOY_DAEMON: Upgrading daemon iscsi.foo.vm04.fbyciv on host vm04 failed." 2026-03-09T00:22:24.956 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch ps' 2026-03-09T00:22:25.192 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:22:24 vm04.local ceph-mon[94619]: pgmap v220: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 833 B/s rd, 1 op/s 2026-03-09T00:22:25.192 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:22:24 vm04.local ceph-mon[94619]: from='client.34634 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:22:25.192 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:22:25 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:22:25] "GET /metrics HTTP/1.1" 200 38088 "" "Prometheus/2.51.0" 2026-03-09T00:22:25.192 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:22:24 vm04.local ceph-mon[96438]: pgmap v220: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 833 B/s rd, 1 op/s 2026-03-09T00:22:25.192 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:22:24 vm04.local ceph-mon[96438]: from='client.34634 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:22:25.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:22:24 vm10.local ceph-mon[82076]: pgmap v220: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 833 B/s rd, 1 op/s 2026-03-09T00:22:25.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:22:24 vm10.local ceph-mon[82076]: from='client.34634 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:22:25.491 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-09T00:22:25.491 INFO:teuthology.orchestra.run.vm04.stdout:alertmanager.a vm04 *:9093,9094 running (19m) 61s ago 25m 28.2M - 0.25.0 c8568f914cd2 bcac0140b0f6 2026-03-09T00:22:25.491 INFO:teuthology.orchestra.run.vm04.stdout:grafana.a vm10 *:3000 running (6m) 61s ago 25m 90.1M - 10.4.0 c8b91775d855 aa7f793dcb8e 2026-03-09T00:22:25.491 INFO:teuthology.orchestra.run.vm04.stdout:iscsi.foo.vm04.fbyciv vm04 running (6m) 61s ago 25m 52.4M - 3.5 e1d6a67b021e cdb4168e72eb 2026-03-09T00:22:25.491 INFO:teuthology.orchestra.run.vm04.stdout:mgr.x vm10 *:8443,9283,8765 running (6m) 61s ago 27m 490M - 19.2.3-678-ge911bdeb 654f31e6858e c971ca6e9652 2026-03-09T00:22:25.491 INFO:teuthology.orchestra.run.vm04.stdout:mgr.y vm04 *:8443,9283,8765 running (16m) 61s ago 28m 563M - 19.2.3-678-ge911bdeb 654f31e6858e 72a51572b51b 2026-03-09T00:22:25.491 INFO:teuthology.orchestra.run.vm04.stdout:mon.a vm04 running (5m) 61s ago 28m 60.6M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 3a1ecb9ee7d1 2026-03-09T00:22:25.491 INFO:teuthology.orchestra.run.vm04.stdout:mon.b vm10 running (6m) 61s ago 27m 50.1M 2048M 19.2.3-678-ge911bdeb 654f31e6858e b102ade927df 2026-03-09T00:22:25.491 INFO:teuthology.orchestra.run.vm04.stdout:mon.c vm04 running (5m) 61s ago 27m 48.0M 2048M 19.2.3-678-ge911bdeb 654f31e6858e d5119f6d2345 2026-03-09T00:22:25.491 INFO:teuthology.orchestra.run.vm04.stdout:node-exporter.a vm04 *:9100 running (19m) 61s ago 26m 9.79M - 1.7.0 72c9c2088986 38e0af6b2fbf 2026-03-09T00:22:25.491 INFO:teuthology.orchestra.run.vm04.stdout:node-exporter.b vm10 *:9100 running (19m) 61s ago 25m 10.3M - 1.7.0 72c9c2088986 d059c0022310 2026-03-09T00:22:25.491 INFO:teuthology.orchestra.run.vm04.stdout:osd.0 vm04 running (5m) 61s ago 27m 77.1M 4096M 19.2.3-678-ge911bdeb 654f31e6858e a5eb77bcb38b 2026-03-09T00:22:25.491 INFO:teuthology.orchestra.run.vm04.stdout:osd.1 vm04 running (4m) 61s ago 27m 56.6M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 522cf40e592d 2026-03-09T00:22:25.491 INFO:teuthology.orchestra.run.vm04.stdout:osd.2 vm04 running (3m) 61s ago 27m 50.9M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 69a18f90367f 2026-03-09T00:22:25.491 INFO:teuthology.orchestra.run.vm04.stdout:osd.3 vm04 running (3m) 61s ago 27m 74.9M 4096M 19.2.3-678-ge911bdeb 654f31e6858e e8bef19a96a6 2026-03-09T00:22:25.491 INFO:teuthology.orchestra.run.vm04.stdout:osd.4 vm10 running (2m) 61s ago 26m 75.4M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 5cd5f044c189 2026-03-09T00:22:25.491 INFO:teuthology.orchestra.run.vm04.stdout:osd.5 vm10 running (2m) 61s ago 26m 50.7M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 50d8ee7c8cb6 2026-03-09T00:22:25.491 INFO:teuthology.orchestra.run.vm04.stdout:osd.6 vm10 running (2m) 61s ago 26m 50.6M 4096M 19.2.3-678-ge911bdeb 654f31e6858e d7d72f87911e 2026-03-09T00:22:25.491 INFO:teuthology.orchestra.run.vm04.stdout:osd.7 vm10 running (105s) 61s ago 26m 70.4M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 09c94bcb8e3f 2026-03-09T00:22:25.491 INFO:teuthology.orchestra.run.vm04.stdout:prometheus.a vm10 *:9095 running (6m) 61s ago 25m 52.6M - 2.51.0 1d3b7f56885b 1f53121cfa7f 2026-03-09T00:22:25.491 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.ehrfsf vm04 *:8000 running (64s) 61s ago 25m 99.2M - 19.2.3-678-ge911bdeb 654f31e6858e 55cfeda28e66 2026-03-09T00:22:25.491 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm10.dwizvi vm10 *:8000 running (62s) 61s ago 25m 98.7M - 19.2.3-678-ge911bdeb 654f31e6858e 1c9761d360d7 2026-03-09T00:22:25.560 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph versions' 2026-03-09T00:22:26.092 INFO:teuthology.orchestra.run.vm04.stdout:{ 2026-03-09T00:22:26.092 INFO:teuthology.orchestra.run.vm04.stdout: "mon": { 2026-03-09T00:22:26.092 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 3 2026-03-09T00:22:26.092 INFO:teuthology.orchestra.run.vm04.stdout: }, 2026-03-09T00:22:26.092 INFO:teuthology.orchestra.run.vm04.stdout: "mgr": { 2026-03-09T00:22:26.093 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-09T00:22:26.093 INFO:teuthology.orchestra.run.vm04.stdout: }, 2026-03-09T00:22:26.093 INFO:teuthology.orchestra.run.vm04.stdout: "osd": { 2026-03-09T00:22:26.093 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 8 2026-03-09T00:22:26.093 INFO:teuthology.orchestra.run.vm04.stdout: }, 2026-03-09T00:22:26.093 INFO:teuthology.orchestra.run.vm04.stdout: "rgw": { 2026-03-09T00:22:26.093 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-09T00:22:26.093 INFO:teuthology.orchestra.run.vm04.stdout: }, 2026-03-09T00:22:26.093 INFO:teuthology.orchestra.run.vm04.stdout: "overall": { 2026-03-09T00:22:26.093 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 15 2026-03-09T00:22:26.093 INFO:teuthology.orchestra.run.vm04.stdout: } 2026-03-09T00:22:26.093 INFO:teuthology.orchestra.run.vm04.stdout:} 2026-03-09T00:22:26.093 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:22:25 vm04.local ceph-mon[94619]: from='client.44652 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:22:26.093 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:22:25 vm04.local ceph-mon[94619]: from='client.44658 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:22:26.093 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:22:25 vm04.local ceph-mon[96438]: from='client.44652 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:22:26.093 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:22:25 vm04.local ceph-mon[96438]: from='client.44658 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:22:26.159 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'echo "wait for servicemap items w/ changing names to refresh"' 2026-03-09T00:22:26.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:22:25 vm10.local ceph-mon[82076]: from='client.44652 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:22:26.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:22:25 vm10.local ceph-mon[82076]: from='client.44658 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:22:26.454 INFO:teuthology.orchestra.run.vm04.stdout:wait for servicemap items w/ changing names to refresh 2026-03-09T00:22:26.488 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'sleep 60' 2026-03-09T00:22:27.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:22:26 vm10.local ceph-mon[82076]: pgmap v221: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 1.5 KiB/s rd, 1 op/s 2026-03-09T00:22:27.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:22:26 vm10.local ceph-mon[82076]: from='client.? 192.168.123.104:0/983414803' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:22:27.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:22:26 vm10.local ceph-mon[82076]: from='client.44598 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:22:27.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:22:26 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:22:27.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:22:27 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:22:27.032Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:22:27.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:22:27 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:22:27.033Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:22:27.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:22:26 vm04.local ceph-mon[94619]: pgmap v221: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 1.5 KiB/s rd, 1 op/s 2026-03-09T00:22:27.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:22:26 vm04.local ceph-mon[94619]: from='client.? 192.168.123.104:0/983414803' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:22:27.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:22:26 vm04.local ceph-mon[94619]: from='client.44598 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:22:27.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:22:26 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:22:27.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:22:26 vm04.local ceph-mon[96438]: pgmap v221: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 1.5 KiB/s rd, 1 op/s 2026-03-09T00:22:27.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:22:26 vm04.local ceph-mon[96438]: from='client.? 192.168.123.104:0/983414803' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:22:27.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:22:26 vm04.local ceph-mon[96438]: from='client.44598 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:22:27.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:22:26 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:22:29.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:22:29 vm10.local ceph-mon[82076]: pgmap v222: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-09T00:22:29.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:22:29 vm04.local ceph-mon[96438]: pgmap v222: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-09T00:22:29.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:22:29 vm04.local ceph-mon[94619]: pgmap v222: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-09T00:22:31.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:22:31 vm10.local ceph-mon[82076]: pgmap v223: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 575 B/s rd, 0 op/s 2026-03-09T00:22:31.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:22:31 vm04.local ceph-mon[96438]: pgmap v223: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 575 B/s rd, 0 op/s 2026-03-09T00:22:31.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:22:31 vm04.local ceph-mon[94619]: pgmap v223: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 575 B/s rd, 0 op/s 2026-03-09T00:22:33.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:22:33 vm10.local ceph-mon[82076]: pgmap v224: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-09T00:22:33.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:22:33 vm04.local ceph-mon[96438]: pgmap v224: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-09T00:22:33.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:22:33 vm04.local ceph-mon[94619]: pgmap v224: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-09T00:22:35.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:22:35 vm10.local ceph-mon[82076]: pgmap v225: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 861 B/s rd, 0 op/s 2026-03-09T00:22:35.350 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:22:35 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:22:35] "GET /metrics HTTP/1.1" 200 38086 "" "Prometheus/2.51.0" 2026-03-09T00:22:35.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:22:35 vm04.local ceph-mon[96438]: pgmap v225: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 861 B/s rd, 0 op/s 2026-03-09T00:22:35.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:22:35 vm04.local ceph-mon[94619]: pgmap v225: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 861 B/s rd, 0 op/s 2026-03-09T00:22:37.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:22:37 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:22:37.034Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:22:37.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:22:37 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:22:37.034Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:22:37.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:22:37 vm04.local ceph-mon[96438]: pgmap v226: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:22:37.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:22:37 vm04.local ceph-mon[96438]: from='client.44598 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:22:37.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:22:37 vm04.local ceph-mon[94619]: pgmap v226: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:22:37.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:22:37 vm04.local ceph-mon[94619]: from='client.44598 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:22:37.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:22:37 vm10.local ceph-mon[82076]: pgmap v226: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:22:37.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:22:37 vm10.local ceph-mon[82076]: from='client.44598 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:22:39.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:22:39 vm10.local ceph-mon[82076]: pgmap v227: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:22:39.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:22:39 vm04.local ceph-mon[96438]: pgmap v227: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:22:39.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:22:39 vm04.local ceph-mon[94619]: pgmap v227: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:22:41.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:22:41 vm10.local ceph-mon[82076]: pgmap v228: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:22:41.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:22:41 vm04.local ceph-mon[96438]: pgmap v228: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:22:41.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:22:41 vm04.local ceph-mon[94619]: pgmap v228: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:22:42.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:22:42 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:22:42.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:22:42 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:22:42.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:22:42 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:22:43.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:22:43 vm10.local ceph-mon[82076]: pgmap v229: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:22:43.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:22:43 vm04.local ceph-mon[96438]: pgmap v229: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:22:43.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:22:43 vm04.local ceph-mon[94619]: pgmap v229: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:22:45.350 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:22:45 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:22:45] "GET /metrics HTTP/1.1" 200 38087 "" "Prometheus/2.51.0" 2026-03-09T00:22:45.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:22:45 vm10.local ceph-mon[82076]: pgmap v230: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:22:45.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:22:45 vm04.local ceph-mon[96438]: pgmap v230: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:22:45.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:22:45 vm04.local ceph-mon[94619]: pgmap v230: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:22:47.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:22:47 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:22:47.034Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:22:47.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:22:47 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:22:47.035Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:22:47.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:22:47 vm10.local ceph-mon[82076]: pgmap v231: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:22:47.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:22:47 vm10.local ceph-mon[82076]: from='client.44598 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:22:47.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:22:47 vm04.local ceph-mon[96438]: pgmap v231: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:22:47.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:22:47 vm04.local ceph-mon[96438]: from='client.44598 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:22:47.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:22:47 vm04.local ceph-mon[94619]: pgmap v231: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:22:47.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:22:47 vm04.local ceph-mon[94619]: from='client.44598 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:22:49.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:22:49 vm10.local ceph-mon[82076]: pgmap v232: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:22:49.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:22:49 vm04.local ceph-mon[96438]: pgmap v232: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:22:49.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:22:49 vm04.local ceph-mon[94619]: pgmap v232: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:22:51.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:22:51 vm10.local ceph-mon[82076]: pgmap v233: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:22:51.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:22:51 vm04.local ceph-mon[96438]: pgmap v233: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:22:51.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:22:51 vm04.local ceph-mon[94619]: pgmap v233: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:22:53.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:22:53 vm10.local ceph-mon[82076]: pgmap v234: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:22:53.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:22:53 vm04.local ceph-mon[96438]: pgmap v234: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:22:53.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:22:53 vm04.local ceph-mon[94619]: pgmap v234: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:22:55.350 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:22:55 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:22:55] "GET /metrics HTTP/1.1" 200 38087 "" "Prometheus/2.51.0" 2026-03-09T00:22:55.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:22:55 vm10.local ceph-mon[82076]: pgmap v235: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:22:55.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:22:55 vm04.local ceph-mon[96438]: pgmap v235: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:22:55.850 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:22:55 vm04.local ceph-mon[94619]: pgmap v235: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:22:57.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:22:57 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:22:57.035Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:22:57.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:22:57 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:22:57.035Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:22:57.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:22:57 vm10.local ceph-mon[82076]: pgmap v236: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:22:57.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:22:57 vm10.local ceph-mon[82076]: from='client.44598 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:22:57.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:22:57 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:22:57.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:22:57 vm04.local ceph-mon[96438]: pgmap v236: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:22:57.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:22:57 vm04.local ceph-mon[96438]: from='client.44598 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:22:57.850 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:22:57 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:22:57.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:22:57 vm04.local ceph-mon[94619]: pgmap v236: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:22:57.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:22:57 vm04.local ceph-mon[94619]: from='client.44598 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:22:57.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:22:57 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:23:00.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:22:59 vm10.local ceph-mon[82076]: pgmap v237: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:23:00.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:22:59 vm04.local ceph-mon[96438]: pgmap v237: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:23:00.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:22:59 vm04.local ceph-mon[94619]: pgmap v237: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:23:02.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:23:01 vm10.local ceph-mon[82076]: pgmap v238: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:23:02.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:23:01 vm04.local ceph-mon[96438]: pgmap v238: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:23:02.100 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:23:01 vm04.local ceph-mon[94619]: pgmap v238: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:23:04.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:23:03 vm10.local ceph-mon[82076]: pgmap v239: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:23:04.100 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:23:03 vm04.local ceph-mon[96438]: pgmap v239: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:23:04.101 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:23:03 vm04.local ceph-mon[94619]: pgmap v239: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:23:05.350 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:23:05 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:23:05] "GET /metrics HTTP/1.1" 200 38088 "" "Prometheus/2.51.0" 2026-03-09T00:23:06.072 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:23:05 vm04.local ceph-mon[96438]: pgmap v240: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:23:06.073 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:23:05 vm04.local ceph-mon[94619]: pgmap v240: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:23:06.078 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:23:05 vm10.local ceph-mon[82076]: pgmap v240: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:23:07.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:23:06 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:23:07.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:23:06 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:23:07.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:23:06 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:23:07.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:23:06 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:23:07.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:23:07 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:23:07.035Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:23:07.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:23:07 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:23:07.036Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:23:07.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:23:06 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:23:07.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:23:06 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:23:07.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:23:06 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:23:07.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:23:06 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:23:07.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:23:06 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T00:23:07.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:23:06 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T00:23:07.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:23:06 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T00:23:07.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:23:06 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' 2026-03-09T00:23:08.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:23:07 vm10.local ceph-mon[82076]: pgmap v241: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:23:08.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:23:07 vm10.local ceph-mon[82076]: from='client.44598 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:23:08.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:23:07 vm04.local ceph-mon[94619]: pgmap v241: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:23:08.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:23:07 vm04.local ceph-mon[94619]: from='client.44598 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:23:08.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:23:07 vm04.local ceph-mon[96438]: pgmap v241: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:23:08.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:23:07 vm04.local ceph-mon[96438]: from='client.44598 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:23:09.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:23:08 vm10.local ceph-mon[82076]: pgmap v242: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:23:09.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:23:08 vm04.local ceph-mon[94619]: pgmap v242: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:23:09.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:23:08 vm04.local ceph-mon[96438]: pgmap v242: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:23:11.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:23:11 vm04.local ceph-mon[94619]: pgmap v243: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:23:11.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:23:11 vm04.local ceph-mon[96438]: pgmap v243: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:23:11.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:23:11 vm10.local ceph-mon[82076]: pgmap v243: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:23:12.328 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:23:12 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:23:12.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:23:12 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:23:12.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:23:12 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:23:13.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:23:13 vm10.local ceph-mon[82076]: pgmap v244: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:23:13.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:23:13 vm04.local ceph-mon[94619]: pgmap v244: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:23:13.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:23:13 vm04.local ceph-mon[96438]: pgmap v244: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:23:15.350 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:23:15 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:23:15] "GET /metrics HTTP/1.1" 200 38086 "" "Prometheus/2.51.0" 2026-03-09T00:23:15.350 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:23:15 vm04.local ceph-mon[94619]: pgmap v245: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:23:15.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:23:15 vm04.local ceph-mon[96438]: pgmap v245: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:23:15.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:23:15 vm10.local ceph-mon[82076]: pgmap v245: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:23:17.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:23:17 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:23:17.036Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:23:17.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:23:17 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:23:17.037Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:23:17.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:23:17 vm04.local ceph-mon[96438]: pgmap v246: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:23:17.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:23:17 vm04.local ceph-mon[96438]: from='client.44598 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:23:17.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:23:17 vm04.local ceph-mon[94619]: pgmap v246: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:23:17.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:23:17 vm04.local ceph-mon[94619]: from='client.44598 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:23:17.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:23:17 vm10.local ceph-mon[82076]: pgmap v246: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:23:17.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:23:17 vm10.local ceph-mon[82076]: from='client.44598 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:23:19.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:23:19 vm10.local ceph-mon[82076]: pgmap v247: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:23:19.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:23:19 vm04.local ceph-mon[96438]: pgmap v247: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:23:19.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:23:19 vm04.local ceph-mon[94619]: pgmap v247: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:23:21.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:23:21 vm10.local ceph-mon[82076]: pgmap v248: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:23:21.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:23:21 vm04.local ceph-mon[96438]: pgmap v248: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:23:21.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:23:21 vm04.local ceph-mon[94619]: pgmap v248: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:23:23.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:23:23 vm10.local ceph-mon[82076]: pgmap v249: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:23:23.600 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:23:23 vm04.local ceph-mon[96438]: pgmap v249: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:23:23.600 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:23:23 vm04.local ceph-mon[94619]: pgmap v249: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:23:25.350 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:23:25 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:23:25] "GET /metrics HTTP/1.1" 200 38086 "" "Prometheus/2.51.0" 2026-03-09T00:23:25.350 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:23:25 vm04.local ceph-mon[96438]: pgmap v250: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:23:25.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:23:25 vm04.local ceph-mon[94619]: pgmap v250: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:23:25.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:23:25 vm10.local ceph-mon[82076]: pgmap v250: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:23:26.835 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch ps' 2026-03-09T00:23:27.047 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:23:27 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:23:27.037Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:23:27.047 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:23:27 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:23:27.037Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:23:27.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:23:27 vm04.local ceph-mon[94619]: pgmap v251: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:23:27.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:23:27 vm04.local ceph-mon[94619]: from='client.44598 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:23:27.351 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:23:27 vm04.local ceph-mon[94619]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:23:27.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:23:27 vm04.local ceph-mon[96438]: pgmap v251: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:23:27.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:23:27 vm04.local ceph-mon[96438]: from='client.44598 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:23:27.351 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:23:27 vm04.local ceph-mon[96438]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:23:27.362 INFO:teuthology.orchestra.run.vm04.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-09T00:23:27.362 INFO:teuthology.orchestra.run.vm04.stdout:alertmanager.a vm04 *:9093,9094 running (20m) 2m ago 26m 28.2M - 0.25.0 c8568f914cd2 bcac0140b0f6 2026-03-09T00:23:27.362 INFO:teuthology.orchestra.run.vm04.stdout:grafana.a vm10 *:3000 running (7m) 2m ago 26m 90.1M - 10.4.0 c8b91775d855 aa7f793dcb8e 2026-03-09T00:23:27.362 INFO:teuthology.orchestra.run.vm04.stdout:iscsi.foo.vm04.fbyciv vm04 running (7m) 2m ago 26m 52.4M - 3.5 e1d6a67b021e cdb4168e72eb 2026-03-09T00:23:27.362 INFO:teuthology.orchestra.run.vm04.stdout:mgr.x vm10 *:8443,9283,8765 running (7m) 2m ago 28m 490M - 19.2.3-678-ge911bdeb 654f31e6858e c971ca6e9652 2026-03-09T00:23:27.362 INFO:teuthology.orchestra.run.vm04.stdout:mgr.y vm04 *:8443,9283,8765 running (17m) 2m ago 29m 563M - 19.2.3-678-ge911bdeb 654f31e6858e 72a51572b51b 2026-03-09T00:23:27.362 INFO:teuthology.orchestra.run.vm04.stdout:mon.a vm04 running (6m) 2m ago 29m 60.6M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 3a1ecb9ee7d1 2026-03-09T00:23:27.362 INFO:teuthology.orchestra.run.vm04.stdout:mon.b vm10 running (7m) 2m ago 28m 50.1M 2048M 19.2.3-678-ge911bdeb 654f31e6858e b102ade927df 2026-03-09T00:23:27.362 INFO:teuthology.orchestra.run.vm04.stdout:mon.c vm04 running (6m) 2m ago 28m 48.0M 2048M 19.2.3-678-ge911bdeb 654f31e6858e d5119f6d2345 2026-03-09T00:23:27.362 INFO:teuthology.orchestra.run.vm04.stdout:node-exporter.a vm04 *:9100 running (20m) 2m ago 27m 9.79M - 1.7.0 72c9c2088986 38e0af6b2fbf 2026-03-09T00:23:27.362 INFO:teuthology.orchestra.run.vm04.stdout:node-exporter.b vm10 *:9100 running (20m) 2m ago 27m 10.3M - 1.7.0 72c9c2088986 d059c0022310 2026-03-09T00:23:27.362 INFO:teuthology.orchestra.run.vm04.stdout:osd.0 vm04 running (6m) 2m ago 28m 77.1M 4096M 19.2.3-678-ge911bdeb 654f31e6858e a5eb77bcb38b 2026-03-09T00:23:27.362 INFO:teuthology.orchestra.run.vm04.stdout:osd.1 vm04 running (5m) 2m ago 28m 56.6M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 522cf40e592d 2026-03-09T00:23:27.362 INFO:teuthology.orchestra.run.vm04.stdout:osd.2 vm04 running (4m) 2m ago 28m 50.9M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 69a18f90367f 2026-03-09T00:23:27.362 INFO:teuthology.orchestra.run.vm04.stdout:osd.3 vm04 running (4m) 2m ago 28m 74.9M 4096M 19.2.3-678-ge911bdeb 654f31e6858e e8bef19a96a6 2026-03-09T00:23:27.363 INFO:teuthology.orchestra.run.vm04.stdout:osd.4 vm10 running (3m) 2m ago 27m 75.4M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 5cd5f044c189 2026-03-09T00:23:27.363 INFO:teuthology.orchestra.run.vm04.stdout:osd.5 vm10 running (3m) 2m ago 27m 50.7M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 50d8ee7c8cb6 2026-03-09T00:23:27.363 INFO:teuthology.orchestra.run.vm04.stdout:osd.6 vm10 running (3m) 2m ago 27m 50.6M 4096M 19.2.3-678-ge911bdeb 654f31e6858e d7d72f87911e 2026-03-09T00:23:27.363 INFO:teuthology.orchestra.run.vm04.stdout:osd.7 vm10 running (2m) 2m ago 27m 70.4M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 09c94bcb8e3f 2026-03-09T00:23:27.363 INFO:teuthology.orchestra.run.vm04.stdout:prometheus.a vm10 *:9095 running (8m) 2m ago 26m 52.6M - 2.51.0 1d3b7f56885b 1f53121cfa7f 2026-03-09T00:23:27.363 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm04.ehrfsf vm04 *:8000 running (2m) 2m ago 26m 99.2M - 19.2.3-678-ge911bdeb 654f31e6858e 55cfeda28e66 2026-03-09T00:23:27.363 INFO:teuthology.orchestra.run.vm04.stdout:rgw.foo.vm10.dwizvi vm10 *:8000 running (2m) 2m ago 26m 98.7M - 19.2.3-678-ge911bdeb 654f31e6858e 1c9761d360d7 2026-03-09T00:23:27.428 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph versions' 2026-03-09T00:23:27.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:23:27 vm10.local ceph-mon[82076]: pgmap v251: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:23:27.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:23:27 vm10.local ceph-mon[82076]: from='client.44598 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:23:27.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:23:27 vm10.local ceph-mon[82076]: from='mgr.25252 192.168.123.104:0/3276845669' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T00:23:27.967 INFO:teuthology.orchestra.run.vm04.stdout:{ 2026-03-09T00:23:27.967 INFO:teuthology.orchestra.run.vm04.stdout: "mon": { 2026-03-09T00:23:27.967 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 3 2026-03-09T00:23:27.967 INFO:teuthology.orchestra.run.vm04.stdout: }, 2026-03-09T00:23:27.967 INFO:teuthology.orchestra.run.vm04.stdout: "mgr": { 2026-03-09T00:23:27.967 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-09T00:23:27.967 INFO:teuthology.orchestra.run.vm04.stdout: }, 2026-03-09T00:23:27.967 INFO:teuthology.orchestra.run.vm04.stdout: "osd": { 2026-03-09T00:23:27.967 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 8 2026-03-09T00:23:27.967 INFO:teuthology.orchestra.run.vm04.stdout: }, 2026-03-09T00:23:27.967 INFO:teuthology.orchestra.run.vm04.stdout: "rgw": { 2026-03-09T00:23:27.967 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-09T00:23:27.967 INFO:teuthology.orchestra.run.vm04.stdout: }, 2026-03-09T00:23:27.967 INFO:teuthology.orchestra.run.vm04.stdout: "overall": { 2026-03-09T00:23:27.967 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 15 2026-03-09T00:23:27.967 INFO:teuthology.orchestra.run.vm04.stdout: } 2026-03-09T00:23:27.967 INFO:teuthology.orchestra.run.vm04.stdout:} 2026-03-09T00:23:28.014 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch upgrade status' 2026-03-09T00:23:28.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:23:28 vm10.local ceph-mon[82076]: from='client.54664 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:23:28.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:23:28 vm10.local ceph-mon[82076]: from='client.? 192.168.123.104:0/1369283104' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:23:28.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:23:28 vm04.local ceph-mon[96438]: from='client.54664 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:23:28.602 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:23:28 vm04.local ceph-mon[96438]: from='client.? 192.168.123.104:0/1369283104' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:23:28.602 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:23:28 vm04.local ceph-mon[94619]: from='client.54664 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:23:28.602 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:23:28 vm04.local ceph-mon[94619]: from='client.? 192.168.123.104:0/1369283104' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:23:28.626 INFO:teuthology.orchestra.run.vm04.stdout:{ 2026-03-09T00:23:28.626 INFO:teuthology.orchestra.run.vm04.stdout: "target_image": "quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc", 2026-03-09T00:23:28.626 INFO:teuthology.orchestra.run.vm04.stdout: "in_progress": true, 2026-03-09T00:23:28.626 INFO:teuthology.orchestra.run.vm04.stdout: "which": "Upgrading all daemon types on all hosts", 2026-03-09T00:23:28.626 INFO:teuthology.orchestra.run.vm04.stdout: "services_complete": [ 2026-03-09T00:23:28.626 INFO:teuthology.orchestra.run.vm04.stdout: "mon", 2026-03-09T00:23:28.626 INFO:teuthology.orchestra.run.vm04.stdout: "osd", 2026-03-09T00:23:28.626 INFO:teuthology.orchestra.run.vm04.stdout: "rgw", 2026-03-09T00:23:28.626 INFO:teuthology.orchestra.run.vm04.stdout: "mgr" 2026-03-09T00:23:28.626 INFO:teuthology.orchestra.run.vm04.stdout: ], 2026-03-09T00:23:28.627 INFO:teuthology.orchestra.run.vm04.stdout: "progress": "15/21 daemons upgraded", 2026-03-09T00:23:28.627 INFO:teuthology.orchestra.run.vm04.stdout: "message": "Error: UPGRADE_REDEPLOY_DAEMON: Upgrading daemon iscsi.foo.vm04.fbyciv on host vm04 failed.", 2026-03-09T00:23:28.627 INFO:teuthology.orchestra.run.vm04.stdout: "is_paused": true 2026-03-09T00:23:28.627 INFO:teuthology.orchestra.run.vm04.stdout:} 2026-03-09T00:23:28.675 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph health detail' 2026-03-09T00:23:29.331 INFO:teuthology.orchestra.run.vm04.stdout:HEALTH_WARN Upgrading daemon iscsi.foo.vm04.fbyciv on host vm04 failed. 2026-03-09T00:23:29.331 INFO:teuthology.orchestra.run.vm04.stdout:[WRN] UPGRADE_REDEPLOY_DAEMON: Upgrading daemon iscsi.foo.vm04.fbyciv on host vm04 failed. 2026-03-09T00:23:29.331 INFO:teuthology.orchestra.run.vm04.stdout: Upgrade daemon: iscsi.foo.vm04.fbyciv: cephadm exited with an error code: 1, stderr: Redeploy daemon iscsi.foo.vm04.fbyciv ... 2026-03-09T00:23:29.331 INFO:teuthology.orchestra.run.vm04.stdout:Creating ceph-iscsi config... 2026-03-09T00:23:29.331 INFO:teuthology.orchestra.run.vm04.stdout:Write file: /var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/iscsi.foo.vm04.fbyciv/iscsi-gateway.cfg 2026-03-09T00:23:29.331 INFO:teuthology.orchestra.run.vm04.stdout:Write file: /var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/iscsi.foo.vm04.fbyciv/tcmu-runner-entrypoint.sh 2026-03-09T00:23:29.331 INFO:teuthology.orchestra.run.vm04.stdout:Failed to trim old cgroups /sys/fs/cgroup/system.slice/system-ceph\x2dfdcbddf6\x2d1b49\x2d11f1\x2d80b0\x2d7392062373f9.slice/ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service 2026-03-09T00:23:29.331 INFO:teuthology.orchestra.run.vm04.stdout:Non-zero exit code 1 from systemctl start ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv 2026-03-09T00:23:29.331 INFO:teuthology.orchestra.run.vm04.stdout:systemctl: stderr Job for ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service failed because the control process exited with error code. 2026-03-09T00:23:29.331 INFO:teuthology.orchestra.run.vm04.stdout:systemctl: stderr See "systemctl status ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" and "journalctl -xeu ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" for details. 2026-03-09T00:23:29.331 INFO:teuthology.orchestra.run.vm04.stdout:Traceback (most recent call last): 2026-03-09T00:23:29.331 INFO:teuthology.orchestra.run.vm04.stdout: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-09T00:23:29.331 INFO:teuthology.orchestra.run.vm04.stdout: return _run_code(code, main_globals, None, 2026-03-09T00:23:29.331 INFO:teuthology.orchestra.run.vm04.stdout: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-09T00:23:29.331 INFO:teuthology.orchestra.run.vm04.stdout: exec(code, run_globals) 2026-03-09T00:23:29.331 INFO:teuthology.orchestra.run.vm04.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5592, in 2026-03-09T00:23:29.331 INFO:teuthology.orchestra.run.vm04.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5580, in main 2026-03-09T00:23:29.331 INFO:teuthology.orchestra.run.vm04.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3062, in command_deploy_from 2026-03-09T00:23:29.331 INFO:teuthology.orchestra.run.vm04.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3097, in _common_deploy 2026-03-09T00:23:29.332 INFO:teuthology.orchestra.run.vm04.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3117, in _deploy_daemon_container 2026-03-09T00:23:29.332 INFO:teuthology.orchestra.run.vm04.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 1090, in deploy_daemon 2026-03-09T00:23:29.332 INFO:teuthology.orchestra.run.vm04.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 1237, in deploy_daemon_units 2026-03-09T00:23:29.332 INFO:teuthology.orchestra.run.vm04.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/cephadmlib/call_wrappers.py", line 307, in call_throws 2026-03-09T00:23:29.332 INFO:teuthology.orchestra.run.vm04.stdout:RuntimeError: Failed command: systemctl start ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv: Job for ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service failed because the control process exited with error code. 2026-03-09T00:23:29.332 INFO:teuthology.orchestra.run.vm04.stdout:See "systemctl status ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" and "journalctl -xeu ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@iscsi.foo.vm04.fbyciv.service" for details. 2026-03-09T00:23:29.403 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph versions | jq -e '"'"'.overall | length == 1'"'"'' 2026-03-09T00:23:29.563 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:23:29 vm04.local ceph-mon[94619]: pgmap v252: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:23:29.563 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:23:29 vm04.local ceph-mon[94619]: from='client.44679 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:23:29.563 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:23:29 vm04.local ceph-mon[96438]: pgmap v252: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:23:29.563 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:23:29 vm04.local ceph-mon[96438]: from='client.44679 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:23:29.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:23:29 vm10.local ceph-mon[82076]: pgmap v252: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:23:29.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:23:29 vm10.local ceph-mon[82076]: from='client.44679 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:23:29.952 INFO:teuthology.orchestra.run.vm04.stdout:true 2026-03-09T00:23:30.008 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph versions | jq -e '"'"'.overall | keys'"'"' | grep $sha1' 2026-03-09T00:23:30.576 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:23:30 vm04.local ceph-mon[96438]: from='client.? 192.168.123.104:0/1754012543' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T00:23:30.577 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:23:30 vm04.local ceph-mon[96438]: from='client.? 192.168.123.104:0/3673020590' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:23:30.577 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:23:30 vm04.local ceph-mon[94619]: from='client.? 192.168.123.104:0/1754012543' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T00:23:30.577 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:23:30 vm04.local ceph-mon[94619]: from='client.? 192.168.123.104:0/3673020590' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:23:30.577 INFO:teuthology.orchestra.run.vm04.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)" 2026-03-09T00:23:30.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:23:30 vm10.local ceph-mon[82076]: from='client.? 192.168.123.104:0/1754012543' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T00:23:30.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:23:30 vm10.local ceph-mon[82076]: from='client.? 192.168.123.104:0/3673020590' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:23:30.613 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch ls | grep '"'"'^osd '"'"'' 2026-03-09T00:23:31.135 INFO:teuthology.orchestra.run.vm04.stdout:osd 8 2m ago - 2026-03-09T00:23:31.192 INFO:teuthology.run_tasks:Running task cephadm.shell... 2026-03-09T00:23:31.194 INFO:tasks.cephadm:Running commands on role mon.a host ubuntu@vm04.local 2026-03-09T00:23:31.194 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -- bash -c 'ceph orch upgrade ls' 2026-03-09T00:23:31.388 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:23:31 vm04.local ceph-mon[96438]: pgmap v253: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:23:31.389 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:23:31 vm04.local ceph-mon[96438]: from='client.? 192.168.123.104:0/1970437901' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:23:31.389 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:23:31 vm04.local ceph-mon[94619]: pgmap v253: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:23:31.389 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:23:31 vm04.local ceph-mon[94619]: from='client.? 192.168.123.104:0/1970437901' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:23:31.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:23:31 vm10.local ceph-mon[82076]: pgmap v253: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:23:31.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:23:31 vm10.local ceph-mon[82076]: from='client.? 192.168.123.104:0/1970437901' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T00:23:32.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:23:32 vm10.local ceph-mon[82076]: from='client.44694 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:23:32.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:23:32 vm10.local ceph-mon[82076]: from='client.54694 -' entity='client.admin' cmd=[{"prefix": "orch upgrade ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:23:32.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:23:32 vm04.local ceph-mon[96438]: from='client.44694 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:23:32.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:23:32 vm04.local ceph-mon[96438]: from='client.54694 -' entity='client.admin' cmd=[{"prefix": "orch upgrade ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:23:32.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:23:32 vm04.local ceph-mon[94619]: from='client.44694 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:23:32.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:23:32 vm04.local ceph-mon[94619]: from='client.54694 -' entity='client.admin' cmd=[{"prefix": "orch upgrade ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:23:33.164 INFO:teuthology.orchestra.run.vm04.stdout:{ 2026-03-09T00:23:33.164 INFO:teuthology.orchestra.run.vm04.stdout: "image": "quay.io/ceph/ceph", 2026-03-09T00:23:33.164 INFO:teuthology.orchestra.run.vm04.stdout: "registry": "quay.io", 2026-03-09T00:23:33.164 INFO:teuthology.orchestra.run.vm04.stdout: "bare_image": "ceph/ceph", 2026-03-09T00:23:33.164 INFO:teuthology.orchestra.run.vm04.stdout: "versions": [ 2026-03-09T00:23:33.164 INFO:teuthology.orchestra.run.vm04.stdout: "20.2.0", 2026-03-09T00:23:33.164 INFO:teuthology.orchestra.run.vm04.stdout: "20.1.1", 2026-03-09T00:23:33.164 INFO:teuthology.orchestra.run.vm04.stdout: "20.1.0", 2026-03-09T00:23:33.165 INFO:teuthology.orchestra.run.vm04.stdout: "19.2.3", 2026-03-09T00:23:33.165 INFO:teuthology.orchestra.run.vm04.stdout: "19.2.2", 2026-03-09T00:23:33.165 INFO:teuthology.orchestra.run.vm04.stdout: "19.2.1", 2026-03-09T00:23:33.165 INFO:teuthology.orchestra.run.vm04.stdout: "19.2.0" 2026-03-09T00:23:33.165 INFO:teuthology.orchestra.run.vm04.stdout: ] 2026-03-09T00:23:33.165 INFO:teuthology.orchestra.run.vm04.stdout:} 2026-03-09T00:23:33.232 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -- bash -c 'ceph orch upgrade ls --image quay.io/ceph/ceph --show-all-versions | grep 16.2.0' 2026-03-09T00:23:33.419 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:23:33 vm04.local ceph-mon[94619]: pgmap v254: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:23:33.419 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:23:33 vm04.local ceph-mon[96438]: pgmap v254: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:23:33.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:23:33 vm10.local ceph-mon[82076]: pgmap v254: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:23:34.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:23:34 vm04.local ceph-mon[96438]: from='client.54697 -' entity='client.admin' cmd=[{"prefix": "orch upgrade ls", "image": "quay.io/ceph/ceph", "show_all_versions": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:23:34.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:23:34 vm04.local ceph-mon[94619]: from='client.54697 -' entity='client.admin' cmd=[{"prefix": "orch upgrade ls", "image": "quay.io/ceph/ceph", "show_all_versions": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:23:34.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:23:34 vm10.local ceph-mon[82076]: from='client.54697 -' entity='client.admin' cmd=[{"prefix": "orch upgrade ls", "image": "quay.io/ceph/ceph", "show_all_versions": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:23:35.201 INFO:teuthology.orchestra.run.vm04.stdout: "16.2.0", 2026-03-09T00:23:35.240 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -- bash -c 'ceph orch upgrade ls --image quay.io/ceph/ceph --tags | grep v16.2.2' 2026-03-09T00:23:35.342 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:23:35 vm04.local ceph-mon[94619]: pgmap v255: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:23:35.342 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:23:35 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: ::ffff:192.168.123.110 - - [09/Mar/2026:00:23:35] "GET /metrics HTTP/1.1" 200 38085 "" "Prometheus/2.51.0" 2026-03-09T00:23:35.598 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:23:35 vm04.local ceph-mon[96438]: pgmap v255: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:23:35.729 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:23:35 vm10.local ceph-mon[82076]: pgmap v255: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T00:23:36.601 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:23:36 vm04.local ceph-mon[94619]: from='client.54703 -' entity='client.admin' cmd=[{"prefix": "orch upgrade ls", "image": "quay.io/ceph/ceph", "tags": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:23:36.601 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:23:36 vm04.local ceph-mon[96438]: from='client.54703 -' entity='client.admin' cmd=[{"prefix": "orch upgrade ls", "image": "quay.io/ceph/ceph", "tags": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:23:36.828 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:23:36 vm10.local ceph-mon[82076]: from='client.54703 -' entity='client.admin' cmd=[{"prefix": "orch upgrade ls", "image": "quay.io/ceph/ceph", "tags": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T00:23:37.158 INFO:teuthology.orchestra.run.vm04.stdout: "v16.2.2", 2026-03-09T00:23:37.158 INFO:teuthology.orchestra.run.vm04.stdout: "v16.2.2-20210505", 2026-03-09T00:23:37.235 DEBUG:teuthology.run_tasks:Unwinding manager cephadm 2026-03-09T00:23:37.237 INFO:tasks.cephadm:Teardown begin 2026-03-09T00:23:37.237 DEBUG:teuthology.orchestra.run.vm04:> sudo rm -f /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 2026-03-09T00:23:37.272 DEBUG:teuthology.orchestra.run.vm10:> sudo rm -f /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 2026-03-09T00:23:37.303 INFO:tasks.cephadm:Disabling cephadm mgr module 2026-03-09T00:23:37.303 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 -- ceph mgr module disable cephadm 2026-03-09T00:23:37.346 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:23:37 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:23:37.038Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:23:37.346 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:23:37 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:23:37.038Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:23:37.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:23:37 vm10.local ceph-mon[82076]: pgmap v256: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:23:37.578 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:23:37 vm10.local ceph-mon[82076]: from='client.44598 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:23:37.597 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:23:37 vm04.local ceph-mon[96438]: pgmap v256: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:23:37.597 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:23:37 vm04.local ceph-mon[96438]: from='client.44598 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:23:37.597 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:23:37 vm04.local ceph-mon[94619]: pgmap v256: 161 pgs: 161 active+clean; 457 KiB data, 292 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T00:23:37.597 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:23:37 vm04.local ceph-mon[94619]: from='client.44598 -' entity='client.iscsi.foo.vm04.fbyciv' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T00:23:37.652 INFO:teuthology.orchestra.run.vm04.stderr:Error: statfs /etc/ceph/ceph.client.admin.keyring: no such file or directory 2026-03-09T00:23:37.673 DEBUG:teuthology.orchestra.run:got remote process result: 125 2026-03-09T00:23:37.673 INFO:tasks.cephadm:Cleaning up testdir ceph.* files... 2026-03-09T00:23:37.673 DEBUG:teuthology.orchestra.run.vm04:> rm -f /home/ubuntu/cephtest/seed.ceph.conf /home/ubuntu/cephtest/ceph.pub 2026-03-09T00:23:37.733 DEBUG:teuthology.orchestra.run.vm10:> rm -f /home/ubuntu/cephtest/seed.ceph.conf /home/ubuntu/cephtest/ceph.pub 2026-03-09T00:23:37.750 INFO:tasks.cephadm:Stopping all daemons... 2026-03-09T00:23:37.750 INFO:tasks.cephadm.mon.a:Stopping mon.a... 2026-03-09T00:23:37.750 DEBUG:teuthology.orchestra.run.vm04:> sudo systemctl stop ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@mon.a 2026-03-09T00:23:37.851 INFO:journalctl@ceph.mon.a.vm04.stdout:Mar 09 00:23:37 vm04.local systemd[1]: Stopping Ceph mon.a for fdcbddf6-1b49-11f1-80b0-7392062373f9... 2026-03-09T00:23:38.106 DEBUG:teuthology.orchestra.run.vm04:> sudo pkill -f 'journalctl -f -n 0 -u ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@mon.a.service' 2026-03-09T00:23:38.150 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-09T00:23:38.150 INFO:tasks.cephadm.mon.a:Stopped mon.a 2026-03-09T00:23:38.150 INFO:tasks.cephadm.mon.b:Stopping mon.c... 2026-03-09T00:23:38.150 DEBUG:teuthology.orchestra.run.vm04:> sudo systemctl stop ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@mon.c 2026-03-09T00:23:38.216 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:23:37 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: [09/Mar/2026:00:23:37] ENGINE Bus STOPPING 2026-03-09T00:23:38.216 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:23:38 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: [09/Mar/2026:00:23:38] ENGINE HTTP Server cherrypy._cpwsgi_server.CPWSGIServer(('::', 9283)) shut down 2026-03-09T00:23:38.216 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:23:38 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: [09/Mar/2026:00:23:38] ENGINE Bus STOPPED 2026-03-09T00:23:38.216 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:23:38 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: [09/Mar/2026:00:23:38] ENGINE Bus STARTING 2026-03-09T00:23:38.216 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:23:38 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: [09/Mar/2026:00:23:38] ENGINE Serving on http://:::9283 2026-03-09T00:23:38.216 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:23:38 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: [09/Mar/2026:00:23:38] ENGINE Bus STARTED 2026-03-09T00:23:38.448 DEBUG:teuthology.orchestra.run.vm04:> sudo pkill -f 'journalctl -f -n 0 -u ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@mon.c.service' 2026-03-09T00:23:38.511 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:23:38 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: [09/Mar/2026:00:23:38] ENGINE Bus STOPPING 2026-03-09T00:23:38.512 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:23:38 vm04.local systemd[1]: Stopping Ceph mon.c for fdcbddf6-1b49-11f1-80b0-7392062373f9... 2026-03-09T00:23:38.512 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:23:38 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mon-c[96434]: 2026-03-09T00:23:38.298+0000 7f0f7312a640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-mon -n mon.c -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false (PID: 1) UID: 0 2026-03-09T00:23:38.512 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:23:38 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mon-c[96434]: 2026-03-09T00:23:38.298+0000 7f0f7312a640 -1 mon.c@1(peon) e4 *** Got Signal Terminated *** 2026-03-09T00:23:38.512 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:23:38 vm04.local podman[129658]: 2026-03-09 00:23:38.354042892 +0000 UTC m=+0.069892781 container died d5119f6d234550c7eab0f26860cff5b292fa76d9ebbf497c4adf645e16ab99ec (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mon-c, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, CEPH_REF=squid, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True) 2026-03-09T00:23:38.512 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:23:38 vm04.local podman[129658]: 2026-03-09 00:23:38.374470666 +0000 UTC m=+0.090320546 container remove d5119f6d234550c7eab0f26860cff5b292fa76d9ebbf497c4adf645e16ab99ec (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mon-c, OSD_FLAVOR=default, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid) 2026-03-09T00:23:38.512 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:23:38 vm04.local bash[129658]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mon-c 2026-03-09T00:23:38.512 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:23:38 vm04.local systemd[1]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@mon.c.service: Deactivated successfully. 2026-03-09T00:23:38.512 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:23:38 vm04.local systemd[1]: Stopped Ceph mon.c for fdcbddf6-1b49-11f1-80b0-7392062373f9. 2026-03-09T00:23:38.512 INFO:journalctl@ceph.mon.c.vm04.stdout:Mar 09 00:23:38 vm04.local systemd[1]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@mon.c.service: Consumed 3.935s CPU time. 2026-03-09T00:23:38.522 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-09T00:23:38.522 INFO:tasks.cephadm.mon.b:Stopped mon.c 2026-03-09T00:23:38.522 INFO:tasks.cephadm.mon.b:Stopping mon.b... 2026-03-09T00:23:38.522 DEBUG:teuthology.orchestra.run.vm10:> sudo systemctl stop ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@mon.b 2026-03-09T00:23:38.799 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:23:38 vm10.local systemd[1]: Stopping Ceph mon.b for fdcbddf6-1b49-11f1-80b0-7392062373f9... 2026-03-09T00:23:38.799 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:23:38 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mon-b[82072]: 2026-03-09T00:23:38.640+0000 7fcccbd72640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-mon -n mon.b -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false (PID: 1) UID: 0 2026-03-09T00:23:38.799 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:23:38 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mon-b[82072]: 2026-03-09T00:23:38.640+0000 7fcccbd72640 -1 mon.b@2(peon) e4 *** Got Signal Terminated *** 2026-03-09T00:23:38.799 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:23:38 vm10.local podman[110075]: 2026-03-09 00:23:38.693580446 +0000 UTC m=+0.067941905 container died b102ade927dfdc95c6946ae58ddfa512f36fc7615c0075195eb14b3bbd47c54e (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mon-b, CEPH_REF=squid, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-09T00:23:38.799 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:23:38 vm10.local podman[110075]: 2026-03-09 00:23:38.719583297 +0000 UTC m=+0.093944756 container remove b102ade927dfdc95c6946ae58ddfa512f36fc7615c0075195eb14b3bbd47c54e (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mon-b, ceph=True, org.label-schema.license=GPLv2, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_REF=squid, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20260223, org.opencontainers.image.authors=Ceph Release Team , FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, OSD_FLAVOR=default) 2026-03-09T00:23:38.799 INFO:journalctl@ceph.mon.b.vm10.stdout:Mar 09 00:23:38 vm10.local bash[110075]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mon-b 2026-03-09T00:23:38.803 DEBUG:teuthology.orchestra.run.vm10:> sudo pkill -f 'journalctl -f -n 0 -u ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@mon.b.service' 2026-03-09T00:23:38.845 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-09T00:23:38.845 INFO:tasks.cephadm.mon.b:Stopped mon.b 2026-03-09T00:23:38.845 INFO:tasks.cephadm.mgr.y:Stopping mgr.y... 2026-03-09T00:23:38.845 DEBUG:teuthology.orchestra.run.vm04:> sudo systemctl stop ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@mgr.y 2026-03-09T00:23:38.851 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:23:38 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: [09/Mar/2026:00:23:38] ENGINE HTTP Server cherrypy._cpwsgi_server.CPWSGIServer(('::', 9283)) shut down 2026-03-09T00:23:38.851 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:23:38 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: [09/Mar/2026:00:23:38] ENGINE Bus STOPPED 2026-03-09T00:23:38.851 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:23:38 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: [09/Mar/2026:00:23:38] ENGINE Bus STARTING 2026-03-09T00:23:38.851 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:23:38 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: [09/Mar/2026:00:23:38] ENGINE Serving on http://:::9283 2026-03-09T00:23:38.851 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:23:38 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y[81423]: [09/Mar/2026:00:23:38] ENGINE Bus STARTED 2026-03-09T00:23:39.126 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:23:38 vm04.local systemd[1]: Stopping Ceph mgr.y for fdcbddf6-1b49-11f1-80b0-7392062373f9... 2026-03-09T00:23:39.127 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:23:39 vm04.local podman[129776]: 2026-03-09 00:23:39.017244315 +0000 UTC m=+0.070556893 container died 72a51572b51b0c1e660b60b36ea1df31ca97d4916a742af47c3547608b47b1ef (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.build-date=20260223, OSD_FLAVOR=default, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, CEPH_REF=squid) 2026-03-09T00:23:39.127 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:23:39 vm04.local podman[129776]: 2026-03-09 00:23:39.061567907 +0000 UTC m=+0.114880495 container remove 72a51572b51b0c1e660b60b36ea1df31ca97d4916a742af47c3547608b47b1ef (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y, io.buildah.version=1.41.3, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20260223, ceph=True, org.label-schema.license=GPLv2, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid, org.label-schema.schema-version=1.0) 2026-03-09T00:23:39.127 INFO:journalctl@ceph.mgr.y.vm04.stdout:Mar 09 00:23:39 vm04.local bash[129776]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-mgr-y 2026-03-09T00:23:39.134 DEBUG:teuthology.orchestra.run.vm04:> sudo pkill -f 'journalctl -f -n 0 -u ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@mgr.y.service' 2026-03-09T00:23:39.215 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-09T00:23:39.215 INFO:tasks.cephadm.mgr.y:Stopped mgr.y 2026-03-09T00:23:39.215 INFO:tasks.cephadm.mgr.x:Stopping mgr.x... 2026-03-09T00:23:39.215 DEBUG:teuthology.orchestra.run.vm10:> sudo systemctl stop ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@mgr.x 2026-03-09T00:23:39.466 DEBUG:teuthology.orchestra.run.vm10:> sudo pkill -f 'journalctl -f -n 0 -u ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@mgr.x.service' 2026-03-09T00:23:39.496 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-09T00:23:39.496 INFO:tasks.cephadm.mgr.x:Stopped mgr.x 2026-03-09T00:23:39.497 INFO:tasks.cephadm.osd.0:Stopping osd.0... 2026-03-09T00:23:39.497 DEBUG:teuthology.orchestra.run.vm04:> sudo systemctl stop ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@osd.0 2026-03-09T00:23:39.850 INFO:journalctl@ceph.osd.0.vm04.stdout:Mar 09 00:23:39 vm04.local systemd[1]: Stopping Ceph osd.0 for fdcbddf6-1b49-11f1-80b0-7392062373f9... 2026-03-09T00:23:39.850 INFO:journalctl@ceph.osd.0.vm04.stdout:Mar 09 00:23:39 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-0[102001]: 2026-03-09T00:23:39.598+0000 7ffb1c6a1640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-09T00:23:39.850 INFO:journalctl@ceph.osd.0.vm04.stdout:Mar 09 00:23:39 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-0[102001]: 2026-03-09T00:23:39.598+0000 7ffb1c6a1640 -1 osd.0 143 *** Got signal Terminated *** 2026-03-09T00:23:39.850 INFO:journalctl@ceph.osd.0.vm04.stdout:Mar 09 00:23:39 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-0[102001]: 2026-03-09T00:23:39.598+0000 7ffb1c6a1640 -1 osd.0 143 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-09T00:23:44.918 INFO:journalctl@ceph.osd.0.vm04.stdout:Mar 09 00:23:44 vm04.local podman[129879]: 2026-03-09 00:23:44.638602007 +0000 UTC m=+5.053256269 container died a5eb77bcb38b57fff2a27f653aba4c12d6ba237a6d15c0a44fae46205a548bfb (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-0, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team , GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, io.buildah.version=1.41.3, CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20260223) 2026-03-09T00:23:44.918 INFO:journalctl@ceph.osd.0.vm04.stdout:Mar 09 00:23:44 vm04.local podman[129879]: 2026-03-09 00:23:44.671761191 +0000 UTC m=+5.086415453 container remove a5eb77bcb38b57fff2a27f653aba4c12d6ba237a6d15c0a44fae46205a548bfb (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.vendor=CentOS, CEPH_REF=squid, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.build-date=20260223, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-09T00:23:44.918 INFO:journalctl@ceph.osd.0.vm04.stdout:Mar 09 00:23:44 vm04.local bash[129879]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-0 2026-03-09T00:23:44.918 INFO:journalctl@ceph.osd.0.vm04.stdout:Mar 09 00:23:44 vm04.local podman[129947]: 2026-03-09 00:23:44.826504147 +0000 UTC m=+0.020366883 container create cb141ad394413b8f2c78b4b25f8b311d488a4c920b7da903dfa12feedd0f9bae (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-0-deactivate, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, io.buildah.version=1.41.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid) 2026-03-09T00:23:44.918 INFO:journalctl@ceph.osd.0.vm04.stdout:Mar 09 00:23:44 vm04.local podman[129947]: 2026-03-09 00:23:44.867528092 +0000 UTC m=+0.061390828 container init cb141ad394413b8f2c78b4b25f8b311d488a4c920b7da903dfa12feedd0f9bae (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-0-deactivate, CEPH_REF=squid, org.label-schema.schema-version=1.0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team , OSD_FLAVOR=default, io.buildah.version=1.41.3, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git) 2026-03-09T00:23:44.918 INFO:journalctl@ceph.osd.0.vm04.stdout:Mar 09 00:23:44 vm04.local podman[129947]: 2026-03-09 00:23:44.871088485 +0000 UTC m=+0.064951221 container start cb141ad394413b8f2c78b4b25f8b311d488a4c920b7da903dfa12feedd0f9bae (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-0-deactivate, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_REF=squid, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.build-date=20260223, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, io.buildah.version=1.41.3) 2026-03-09T00:23:44.918 INFO:journalctl@ceph.osd.0.vm04.stdout:Mar 09 00:23:44 vm04.local podman[129947]: 2026-03-09 00:23:44.873242436 +0000 UTC m=+0.067105172 container attach cb141ad394413b8f2c78b4b25f8b311d488a4c920b7da903dfa12feedd0f9bae (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-0-deactivate, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_REF=squid, org.label-schema.schema-version=1.0, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team , FROM_IMAGE=quay.io/centos/centos:stream9) 2026-03-09T00:23:44.918 INFO:journalctl@ceph.osd.0.vm04.stdout:Mar 09 00:23:44 vm04.local podman[129947]: 2026-03-09 00:23:44.81849705 +0000 UTC m=+0.012359816 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc 2026-03-09T00:23:45.042 DEBUG:teuthology.orchestra.run.vm04:> sudo pkill -f 'journalctl -f -n 0 -u ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@osd.0.service' 2026-03-09T00:23:45.080 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-09T00:23:45.080 INFO:tasks.cephadm.osd.0:Stopped osd.0 2026-03-09T00:23:45.080 INFO:tasks.cephadm.osd.1:Stopping osd.1... 2026-03-09T00:23:45.080 DEBUG:teuthology.orchestra.run.vm04:> sudo systemctl stop ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@osd.1 2026-03-09T00:23:45.236 INFO:journalctl@ceph.osd.1.vm04.stdout:Mar 09 00:23:45 vm04.local systemd[1]: Stopping Ceph osd.1 for fdcbddf6-1b49-11f1-80b0-7392062373f9... 2026-03-09T00:23:45.600 INFO:journalctl@ceph.osd.1.vm04.stdout:Mar 09 00:23:45 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-1[106256]: 2026-03-09T00:23:45.235+0000 7f8a2514f640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-09T00:23:45.601 INFO:journalctl@ceph.osd.1.vm04.stdout:Mar 09 00:23:45 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-1[106256]: 2026-03-09T00:23:45.235+0000 7f8a2514f640 -1 osd.1 143 *** Got signal Terminated *** 2026-03-09T00:23:45.601 INFO:journalctl@ceph.osd.1.vm04.stdout:Mar 09 00:23:45 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-1[106256]: 2026-03-09T00:23:45.235+0000 7f8a2514f640 -1 osd.1 143 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-09T00:23:47.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:23:47 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:23:47.039Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:23:47.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:23:47 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:23:47.040Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:23:50.542 INFO:journalctl@ceph.osd.1.vm04.stdout:Mar 09 00:23:50 vm04.local podman[130042]: 2026-03-09 00:23:50.259808855 +0000 UTC m=+5.040898398 container died 522cf40e592dc4a5808ef2a62ca73007729f265ac0cf472a5bc36247b3785861 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-1, CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.label-schema.build-date=20260223, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team , ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image) 2026-03-09T00:23:50.542 INFO:journalctl@ceph.osd.1.vm04.stdout:Mar 09 00:23:50 vm04.local podman[130042]: 2026-03-09 00:23:50.291490604 +0000 UTC m=+5.072580146 container remove 522cf40e592dc4a5808ef2a62ca73007729f265ac0cf472a5bc36247b3785861 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-1, org.label-schema.license=GPLv2, CEPH_REF=squid, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team , FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS) 2026-03-09T00:23:50.542 INFO:journalctl@ceph.osd.1.vm04.stdout:Mar 09 00:23:50 vm04.local bash[130042]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-1 2026-03-09T00:23:50.797 INFO:journalctl@ceph.osd.1.vm04.stdout:Mar 09 00:23:50 vm04.local podman[130111]: 2026-03-09 00:23:50.443409984 +0000 UTC m=+0.010206594 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc 2026-03-09T00:23:50.797 INFO:journalctl@ceph.osd.1.vm04.stdout:Mar 09 00:23:50 vm04.local podman[130111]: 2026-03-09 00:23:50.589251669 +0000 UTC m=+0.156048279 container create c6c698271ebee830814f84d82070815bbfcc9e2e8101695ef6a90f7ae3b3a769 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-1-deactivate, ceph=True, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team , GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-09T00:23:50.797 INFO:journalctl@ceph.osd.1.vm04.stdout:Mar 09 00:23:50 vm04.local podman[130111]: 2026-03-09 00:23:50.636678938 +0000 UTC m=+0.203475548 container init c6c698271ebee830814f84d82070815bbfcc9e2e8101695ef6a90f7ae3b3a769 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-1-deactivate, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, ceph=True, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid, io.buildah.version=1.41.3, org.opencontainers.image.authors=Ceph Release Team , FROM_IMAGE=quay.io/centos/centos:stream9) 2026-03-09T00:23:50.797 INFO:journalctl@ceph.osd.1.vm04.stdout:Mar 09 00:23:50 vm04.local podman[130111]: 2026-03-09 00:23:50.640049889 +0000 UTC m=+0.206846499 container start c6c698271ebee830814f84d82070815bbfcc9e2e8101695ef6a90f7ae3b3a769 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-1-deactivate, org.label-schema.vendor=CentOS, org.label-schema.build-date=20260223, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_REF=squid, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, io.buildah.version=1.41.3) 2026-03-09T00:23:50.797 INFO:journalctl@ceph.osd.1.vm04.stdout:Mar 09 00:23:50 vm04.local podman[130111]: 2026-03-09 00:23:50.641294367 +0000 UTC m=+0.208090977 container attach c6c698271ebee830814f84d82070815bbfcc9e2e8101695ef6a90f7ae3b3a769 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-1-deactivate, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, OSD_FLAVOR=default, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid, org.label-schema.build-date=20260223) 2026-03-09T00:23:50.825 DEBUG:teuthology.orchestra.run.vm04:> sudo pkill -f 'journalctl -f -n 0 -u ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@osd.1.service' 2026-03-09T00:23:50.862 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-09T00:23:50.863 INFO:tasks.cephadm.osd.1:Stopped osd.1 2026-03-09T00:23:50.863 INFO:tasks.cephadm.osd.2:Stopping osd.2... 2026-03-09T00:23:50.863 DEBUG:teuthology.orchestra.run.vm04:> sudo systemctl stop ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@osd.2 2026-03-09T00:23:51.101 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 09 00:23:50 vm04.local systemd[1]: Stopping Ceph osd.2 for fdcbddf6-1b49-11f1-80b0-7392062373f9... 2026-03-09T00:23:51.101 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 09 00:23:51 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-2[111922]: 2026-03-09T00:23:51.011+0000 7feb2f45f640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-09T00:23:51.101 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 09 00:23:51 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-2[111922]: 2026-03-09T00:23:51.011+0000 7feb2f45f640 -1 osd.2 143 *** Got signal Terminated *** 2026-03-09T00:23:51.101 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 09 00:23:51 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-2[111922]: 2026-03-09T00:23:51.011+0000 7feb2f45f640 -1 osd.2 143 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-09T00:23:56.315 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 09 00:23:56 vm04.local podman[130210]: 2026-03-09 00:23:56.043708039 +0000 UTC m=+5.047050214 container died 69a18f90367fdb6b22d2e8a15c29c25399e3ed450c3f98d6689baec8f824481f (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-2, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.build-date=20260223, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team , OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.3) 2026-03-09T00:23:56.315 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 09 00:23:56 vm04.local podman[130210]: 2026-03-09 00:23:56.069645076 +0000 UTC m=+5.072987251 container remove 69a18f90367fdb6b22d2e8a15c29c25399e3ed450c3f98d6689baec8f824481f (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-2, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid, ceph=True, org.label-schema.vendor=CentOS, org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.schema-version=1.0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9) 2026-03-09T00:23:56.316 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 09 00:23:56 vm04.local bash[130210]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-2 2026-03-09T00:23:56.316 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 09 00:23:56 vm04.local podman[130278]: 2026-03-09 00:23:56.223728646 +0000 UTC m=+0.019363223 container create c55f32e4a266cb95aff2856823bb7f1a68c6431bb28d774d45c9a2ba4fc0a042 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-2-deactivate, ceph=True, org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.license=GPLv2, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_REF=squid, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, org.opencontainers.image.authors=Ceph Release Team ) 2026-03-09T00:23:56.316 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 09 00:23:56 vm04.local podman[130278]: 2026-03-09 00:23:56.272072843 +0000 UTC m=+0.067707430 container init c55f32e4a266cb95aff2856823bb7f1a68c6431bb28d774d45c9a2ba4fc0a042 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-2-deactivate, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.schema-version=1.0, CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, org.label-schema.vendor=CentOS, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2) 2026-03-09T00:23:56.316 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 09 00:23:56 vm04.local podman[130278]: 2026-03-09 00:23:56.275365576 +0000 UTC m=+0.071000153 container start c55f32e4a266cb95aff2856823bb7f1a68c6431bb28d774d45c9a2ba4fc0a042 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-2-deactivate, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, ceph=True, org.label-schema.build-date=20260223, CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default) 2026-03-09T00:23:56.316 INFO:journalctl@ceph.osd.2.vm04.stdout:Mar 09 00:23:56 vm04.local podman[130278]: 2026-03-09 00:23:56.276402426 +0000 UTC m=+0.072037003 container attach c55f32e4a266cb95aff2856823bb7f1a68c6431bb28d774d45c9a2ba4fc0a042 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-2-deactivate, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, CEPH_REF=squid, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.build-date=20260223, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0) 2026-03-09T00:23:56.444 DEBUG:teuthology.orchestra.run.vm04:> sudo pkill -f 'journalctl -f -n 0 -u ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@osd.2.service' 2026-03-09T00:23:56.483 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-09T00:23:56.483 INFO:tasks.cephadm.osd.2:Stopped osd.2 2026-03-09T00:23:56.483 INFO:tasks.cephadm.osd.3:Stopping osd.3... 2026-03-09T00:23:56.483 DEBUG:teuthology.orchestra.run.vm04:> sudo systemctl stop ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@osd.3 2026-03-09T00:23:56.600 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 09 00:23:56 vm04.local systemd[1]: Stopping Ceph osd.3 for fdcbddf6-1b49-11f1-80b0-7392062373f9... 2026-03-09T00:23:57.039 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 09 00:23:56 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-3[117094]: 2026-03-09T00:23:56.627+0000 7fad03dc8640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.3 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-09T00:23:57.039 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 09 00:23:56 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-3[117094]: 2026-03-09T00:23:56.627+0000 7fad03dc8640 -1 osd.3 143 *** Got signal Terminated *** 2026-03-09T00:23:57.039 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 09 00:23:56 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-3[117094]: 2026-03-09T00:23:56.627+0000 7fad03dc8640 -1 osd.3 143 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-09T00:23:57.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:23:57 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:23:57.039Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:23:57.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:23:57 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:23:57.040Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:24:01.976 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 09 00:24:01 vm04.local podman[130374]: 2026-03-09 00:24:01.65515435 +0000 UTC m=+5.042397294 container died e8bef19a96a6f33d6a4fe24f57b8edb4cf37b521840304036c3c273b88092b87 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-3, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20260223, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, ceph=True, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid, OSD_FLAVOR=default, io.buildah.version=1.41.3, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image) 2026-03-09T00:24:01.976 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 09 00:24:01 vm04.local podman[130374]: 2026-03-09 00:24:01.67957572 +0000 UTC m=+5.066818644 container remove e8bef19a96a6f33d6a4fe24f57b8edb4cf37b521840304036c3c273b88092b87 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-3, ceph=True, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.build-date=20260223, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9) 2026-03-09T00:24:01.976 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 09 00:24:01 vm04.local bash[130374]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-3 2026-03-09T00:24:01.976 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 09 00:24:01 vm04.local podman[130439]: 2026-03-09 00:24:01.805362427 +0000 UTC m=+0.014911581 container create 0a0e81818e6d7cae2b1d854f0e9ac849068a29aa1b1447ecbd4f16c56c9dc327 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-3-deactivate, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, CEPH_REF=squid, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.41.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-09T00:24:01.976 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 09 00:24:01 vm04.local podman[130439]: 2026-03-09 00:24:01.840232464 +0000 UTC m=+0.049781627 container init 0a0e81818e6d7cae2b1d854f0e9ac849068a29aa1b1447ecbd4f16c56c9dc327 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-3-deactivate, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team , FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=squid, ceph=True) 2026-03-09T00:24:01.976 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 09 00:24:01 vm04.local podman[130439]: 2026-03-09 00:24:01.843840477 +0000 UTC m=+0.053389631 container start 0a0e81818e6d7cae2b1d854f0e9ac849068a29aa1b1447ecbd4f16c56c9dc327 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-3-deactivate, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, CEPH_REF=squid, org.label-schema.build-date=20260223, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, OSD_FLAVOR=default, io.buildah.version=1.41.3, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df) 2026-03-09T00:24:01.976 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 09 00:24:01 vm04.local podman[130439]: 2026-03-09 00:24:01.844690108 +0000 UTC m=+0.054239262 container attach 0a0e81818e6d7cae2b1d854f0e9ac849068a29aa1b1447ecbd4f16c56c9dc327 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-3-deactivate, ceph=True, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=squid, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-09T00:24:01.976 INFO:journalctl@ceph.osd.3.vm04.stdout:Mar 09 00:24:01 vm04.local podman[130439]: 2026-03-09 00:24:01.799091209 +0000 UTC m=+0.008640374 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc 2026-03-09T00:24:02.039 DEBUG:teuthology.orchestra.run.vm04:> sudo pkill -f 'journalctl -f -n 0 -u ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@osd.3.service' 2026-03-09T00:24:02.079 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-09T00:24:02.079 INFO:tasks.cephadm.osd.3:Stopped osd.3 2026-03-09T00:24:02.079 INFO:tasks.cephadm.osd.4:Stopping osd.4... 2026-03-09T00:24:02.079 DEBUG:teuthology.orchestra.run.vm10:> sudo systemctl stop ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@osd.4 2026-03-09T00:24:02.578 INFO:journalctl@ceph.osd.4.vm10.stdout:Mar 09 00:24:02 vm10.local systemd[1]: Stopping Ceph osd.4 for fdcbddf6-1b49-11f1-80b0-7392062373f9... 2026-03-09T00:24:02.578 INFO:journalctl@ceph.osd.4.vm10.stdout:Mar 09 00:24:02 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-4[90632]: 2026-03-09T00:24:02.190+0000 7fc6e0871640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.4 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-09T00:24:02.578 INFO:journalctl@ceph.osd.4.vm10.stdout:Mar 09 00:24:02 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-4[90632]: 2026-03-09T00:24:02.190+0000 7fc6e0871640 -1 osd.4 143 *** Got signal Terminated *** 2026-03-09T00:24:02.578 INFO:journalctl@ceph.osd.4.vm10.stdout:Mar 09 00:24:02 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-4[90632]: 2026-03-09T00:24:02.190+0000 7fc6e0871640 -1 osd.4 143 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-09T00:24:07.078 INFO:journalctl@ceph.osd.4.vm10.stdout:Mar 09 00:24:07 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-4[90632]: 2026-03-09T00:24:07.007+0000 7fc6dc689640 -1 osd.4 143 heartbeat_check: no reply from 192.168.123.104:6806 osd.0 since back 2026-03-09T00:23:42.739459+0000 front 2026-03-09T00:23:42.739577+0000 (oldest deadline 2026-03-09T00:24:06.239273+0000) 2026-03-09T00:24:07.078 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:24:06 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-6[98928]: 2026-03-09T00:24:06.806+0000 7f302ce1a640 -1 osd.6 143 heartbeat_check: no reply from 192.168.123.104:6806 osd.0 since back 2026-03-09T00:23:44.043468+0000 front 2026-03-09T00:23:44.043154+0000 (oldest deadline 2026-03-09T00:24:06.343036+0000) 2026-03-09T00:24:07.351 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:24:07 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:24:07.040Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:24:07.351 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:24:07 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:24:07.042Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:24:07.380 INFO:journalctl@ceph.osd.4.vm10.stdout:Mar 09 00:24:07 vm10.local podman[110283]: 2026-03-09 00:24:07.222661137 +0000 UTC m=+5.049767606 container died 5cd5f044c1899600c6509a5b85aaaff70d8a83c23b89bbef2eca673eb5dad100 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-4, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, ceph=True, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9) 2026-03-09T00:24:07.380 INFO:journalctl@ceph.osd.4.vm10.stdout:Mar 09 00:24:07 vm10.local podman[110283]: 2026-03-09 00:24:07.255366001 +0000 UTC m=+5.082472480 container remove 5cd5f044c1899600c6509a5b85aaaff70d8a83c23b89bbef2eca673eb5dad100 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-4, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.build-date=20260223, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, CEPH_REF=squid, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, io.buildah.version=1.41.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-09T00:24:07.381 INFO:journalctl@ceph.osd.4.vm10.stdout:Mar 09 00:24:07 vm10.local bash[110283]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-4 2026-03-09T00:24:07.676 INFO:journalctl@ceph.osd.4.vm10.stdout:Mar 09 00:24:07 vm10.local podman[110349]: 2026-03-09 00:24:07.448909892 +0000 UTC m=+0.027664893 container create bdd094fbf114123ee672fe396093c8a1a368f6137ad03b64a76af201a04cb770 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-4-deactivate, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9) 2026-03-09T00:24:07.676 INFO:journalctl@ceph.osd.4.vm10.stdout:Mar 09 00:24:07 vm10.local podman[110349]: 2026-03-09 00:24:07.502692021 +0000 UTC m=+0.081447022 container init bdd094fbf114123ee672fe396093c8a1a368f6137ad03b64a76af201a04cb770 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-4-deactivate, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.license=GPLv2, OSD_FLAVOR=default, ceph=True, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20260223, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.authors=Ceph Release Team ) 2026-03-09T00:24:07.676 INFO:journalctl@ceph.osd.4.vm10.stdout:Mar 09 00:24:07 vm10.local podman[110349]: 2026-03-09 00:24:07.506176681 +0000 UTC m=+0.084931672 container start bdd094fbf114123ee672fe396093c8a1a368f6137ad03b64a76af201a04cb770 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-4-deactivate, CEPH_REF=squid, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3) 2026-03-09T00:24:07.676 INFO:journalctl@ceph.osd.4.vm10.stdout:Mar 09 00:24:07 vm10.local podman[110349]: 2026-03-09 00:24:07.512992497 +0000 UTC m=+0.091747498 container attach bdd094fbf114123ee672fe396093c8a1a368f6137ad03b64a76af201a04cb770 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-4-deactivate, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, ceph=True, CEPH_REF=squid, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-09T00:24:07.676 INFO:journalctl@ceph.osd.4.vm10.stdout:Mar 09 00:24:07 vm10.local podman[110349]: 2026-03-09 00:24:07.439889921 +0000 UTC m=+0.018644922 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc 2026-03-09T00:24:07.718 DEBUG:teuthology.orchestra.run.vm10:> sudo pkill -f 'journalctl -f -n 0 -u ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@osd.4.service' 2026-03-09T00:24:07.760 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-09T00:24:07.760 INFO:tasks.cephadm.osd.4:Stopped osd.4 2026-03-09T00:24:07.760 INFO:tasks.cephadm.osd.5:Stopping osd.5... 2026-03-09T00:24:07.761 DEBUG:teuthology.orchestra.run.vm10:> sudo systemctl stop ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@osd.5 2026-03-09T00:24:07.998 INFO:journalctl@ceph.osd.5.vm10.stdout:Mar 09 00:24:07 vm10.local systemd[1]: Stopping Ceph osd.5 for fdcbddf6-1b49-11f1-80b0-7392062373f9... 2026-03-09T00:24:07.998 INFO:journalctl@ceph.osd.5.vm10.stdout:Mar 09 00:24:07 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-5[94772]: 2026-03-09T00:24:07.919+0000 7f91f2018640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.5 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-09T00:24:07.998 INFO:journalctl@ceph.osd.5.vm10.stdout:Mar 09 00:24:07 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-5[94772]: 2026-03-09T00:24:07.919+0000 7f91f2018640 -1 osd.5 143 *** Got signal Terminated *** 2026-03-09T00:24:07.998 INFO:journalctl@ceph.osd.5.vm10.stdout:Mar 09 00:24:07 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-5[94772]: 2026-03-09T00:24:07.919+0000 7f91f2018640 -1 osd.5 143 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-09T00:24:07.998 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:24:07 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-6[98928]: 2026-03-09T00:24:07.852+0000 7f302ce1a640 -1 osd.6 143 heartbeat_check: no reply from 192.168.123.104:6806 osd.0 since back 2026-03-09T00:23:44.043468+0000 front 2026-03-09T00:23:44.043154+0000 (oldest deadline 2026-03-09T00:24:06.343036+0000) 2026-03-09T00:24:09.078 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:24:08 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-7[103089]: 2026-03-09T00:24:08.791+0000 7faf23b2f640 -1 osd.7 143 heartbeat_check: no reply from 192.168.123.104:6806 osd.0 since back 2026-03-09T00:23:43.934169+0000 front 2026-03-09T00:23:43.934128+0000 (oldest deadline 2026-03-09T00:24:08.633918+0000) 2026-03-09T00:24:09.078 INFO:journalctl@ceph.osd.5.vm10.stdout:Mar 09 00:24:08 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-5[94772]: 2026-03-09T00:24:08.747+0000 7f91ee631640 -1 osd.5 143 heartbeat_check: no reply from 192.168.123.104:6806 osd.0 since back 2026-03-09T00:23:42.216862+0000 front 2026-03-09T00:23:42.216825+0000 (oldest deadline 2026-03-09T00:24:08.116422+0000) 2026-03-09T00:24:09.078 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:24:08 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-6[98928]: 2026-03-09T00:24:08.857+0000 7f302ce1a640 -1 osd.6 143 heartbeat_check: no reply from 192.168.123.104:6806 osd.0 since back 2026-03-09T00:23:44.043468+0000 front 2026-03-09T00:23:44.043154+0000 (oldest deadline 2026-03-09T00:24:06.343036+0000) 2026-03-09T00:24:10.078 INFO:journalctl@ceph.osd.5.vm10.stdout:Mar 09 00:24:09 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-5[94772]: 2026-03-09T00:24:09.745+0000 7f91ee631640 -1 osd.5 143 heartbeat_check: no reply from 192.168.123.104:6806 osd.0 since back 2026-03-09T00:23:42.216862+0000 front 2026-03-09T00:23:42.216825+0000 (oldest deadline 2026-03-09T00:24:08.116422+0000) 2026-03-09T00:24:10.078 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:24:09 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-7[103089]: 2026-03-09T00:24:09.772+0000 7faf23b2f640 -1 osd.7 143 heartbeat_check: no reply from 192.168.123.104:6806 osd.0 since back 2026-03-09T00:23:43.934169+0000 front 2026-03-09T00:23:43.934128+0000 (oldest deadline 2026-03-09T00:24:08.633918+0000) 2026-03-09T00:24:10.078 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:24:09 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-6[98928]: 2026-03-09T00:24:09.880+0000 7f302ce1a640 -1 osd.6 143 heartbeat_check: no reply from 192.168.123.104:6806 osd.0 since back 2026-03-09T00:23:44.043468+0000 front 2026-03-09T00:23:44.043154+0000 (oldest deadline 2026-03-09T00:24:06.343036+0000) 2026-03-09T00:24:11.078 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:24:10 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-7[103089]: 2026-03-09T00:24:10.801+0000 7faf23b2f640 -1 osd.7 143 heartbeat_check: no reply from 192.168.123.104:6806 osd.0 since back 2026-03-09T00:23:43.934169+0000 front 2026-03-09T00:23:43.934128+0000 (oldest deadline 2026-03-09T00:24:08.633918+0000) 2026-03-09T00:24:11.078 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:24:10 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-6[98928]: 2026-03-09T00:24:10.843+0000 7f302ce1a640 -1 osd.6 143 heartbeat_check: no reply from 192.168.123.104:6806 osd.0 since back 2026-03-09T00:23:44.043468+0000 front 2026-03-09T00:23:44.043154+0000 (oldest deadline 2026-03-09T00:24:06.343036+0000) 2026-03-09T00:24:11.078 INFO:journalctl@ceph.osd.5.vm10.stdout:Mar 09 00:24:10 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-5[94772]: 2026-03-09T00:24:10.712+0000 7f91ee631640 -1 osd.5 143 heartbeat_check: no reply from 192.168.123.104:6806 osd.0 since back 2026-03-09T00:23:42.216862+0000 front 2026-03-09T00:23:42.216825+0000 (oldest deadline 2026-03-09T00:24:08.116422+0000) 2026-03-09T00:24:12.078 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:24:11 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-7[103089]: 2026-03-09T00:24:11.759+0000 7faf23b2f640 -1 osd.7 143 heartbeat_check: no reply from 192.168.123.104:6806 osd.0 since back 2026-03-09T00:23:43.934169+0000 front 2026-03-09T00:23:43.934128+0000 (oldest deadline 2026-03-09T00:24:08.633918+0000) 2026-03-09T00:24:12.078 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:24:11 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-7[103089]: 2026-03-09T00:24:11.759+0000 7faf23b2f640 -1 osd.7 143 heartbeat_check: no reply from 192.168.123.104:6814 osd.1 since back 2026-03-09T00:23:49.735148+0000 front 2026-03-09T00:23:49.735026+0000 (oldest deadline 2026-03-09T00:24:10.834498+0000) 2026-03-09T00:24:12.078 INFO:journalctl@ceph.osd.5.vm10.stdout:Mar 09 00:24:11 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-5[94772]: 2026-03-09T00:24:11.737+0000 7f91ee631640 -1 osd.5 143 heartbeat_check: no reply from 192.168.123.104:6806 osd.0 since back 2026-03-09T00:23:42.216862+0000 front 2026-03-09T00:23:42.216825+0000 (oldest deadline 2026-03-09T00:24:08.116422+0000) 2026-03-09T00:24:12.078 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:24:11 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-6[98928]: 2026-03-09T00:24:11.890+0000 7f302ce1a640 -1 osd.6 143 heartbeat_check: no reply from 192.168.123.104:6806 osd.0 since back 2026-03-09T00:23:44.043468+0000 front 2026-03-09T00:23:44.043154+0000 (oldest deadline 2026-03-09T00:24:06.343036+0000) 2026-03-09T00:24:12.078 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:24:11 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-6[98928]: 2026-03-09T00:24:11.890+0000 7f302ce1a640 -1 osd.6 143 heartbeat_check: no reply from 192.168.123.104:6814 osd.1 since back 2026-03-09T00:23:46.343597+0000 front 2026-03-09T00:23:46.343397+0000 (oldest deadline 2026-03-09T00:24:11.643253+0000) 2026-03-09T00:24:12.982 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:24:12 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-7[103089]: 2026-03-09T00:24:12.748+0000 7faf23b2f640 -1 osd.7 143 heartbeat_check: no reply from 192.168.123.104:6806 osd.0 since back 2026-03-09T00:23:43.934169+0000 front 2026-03-09T00:23:43.934128+0000 (oldest deadline 2026-03-09T00:24:08.633918+0000) 2026-03-09T00:24:12.982 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:24:12 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-7[103089]: 2026-03-09T00:24:12.748+0000 7faf23b2f640 -1 osd.7 143 heartbeat_check: no reply from 192.168.123.104:6814 osd.1 since back 2026-03-09T00:23:49.735148+0000 front 2026-03-09T00:23:49.735026+0000 (oldest deadline 2026-03-09T00:24:10.834498+0000) 2026-03-09T00:24:12.982 INFO:journalctl@ceph.osd.5.vm10.stdout:Mar 09 00:24:12 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-5[94772]: 2026-03-09T00:24:12.716+0000 7f91ee631640 -1 osd.5 143 heartbeat_check: no reply from 192.168.123.104:6806 osd.0 since back 2026-03-09T00:23:42.216862+0000 front 2026-03-09T00:23:42.216825+0000 (oldest deadline 2026-03-09T00:24:08.116422+0000) 2026-03-09T00:24:12.982 INFO:journalctl@ceph.osd.5.vm10.stdout:Mar 09 00:24:12 vm10.local podman[110454]: 2026-03-09 00:24:12.950173422 +0000 UTC m=+5.045391988 container died 50d8ee7c8cb6e3485ffeb3b57f487affbf0f0691ccaafce019c71914a99c7492 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-5, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid, org.label-schema.build-date=20260223, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, ceph=True, org.opencontainers.image.authors=Ceph Release Team , GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0) 2026-03-09T00:24:12.982 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:24:12 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-6[98928]: 2026-03-09T00:24:12.848+0000 7f302ce1a640 -1 osd.6 143 heartbeat_check: no reply from 192.168.123.104:6806 osd.0 since back 2026-03-09T00:23:44.043468+0000 front 2026-03-09T00:23:44.043154+0000 (oldest deadline 2026-03-09T00:24:06.343036+0000) 2026-03-09T00:24:12.982 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:24:12 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-6[98928]: 2026-03-09T00:24:12.848+0000 7f302ce1a640 -1 osd.6 143 heartbeat_check: no reply from 192.168.123.104:6814 osd.1 since back 2026-03-09T00:23:46.343597+0000 front 2026-03-09T00:23:46.343397+0000 (oldest deadline 2026-03-09T00:24:11.643253+0000) 2026-03-09T00:24:13.323 INFO:journalctl@ceph.osd.5.vm10.stdout:Mar 09 00:24:12 vm10.local podman[110454]: 2026-03-09 00:24:12.981419977 +0000 UTC m=+5.076638544 container remove 50d8ee7c8cb6e3485ffeb3b57f487affbf0f0691ccaafce019c71914a99c7492 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-5, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team , OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20260223) 2026-03-09T00:24:13.323 INFO:journalctl@ceph.osd.5.vm10.stdout:Mar 09 00:24:12 vm10.local bash[110454]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-5 2026-03-09T00:24:13.323 INFO:journalctl@ceph.osd.5.vm10.stdout:Mar 09 00:24:13 vm10.local podman[110523]: 2026-03-09 00:24:13.135071312 +0000 UTC m=+0.020542965 container create e0f01bf32c045d0d3facfc4590986563f85813f4f09693a30b59060006a7baa4 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-5-deactivate, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20260223, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, CEPH_REF=squid) 2026-03-09T00:24:13.323 INFO:journalctl@ceph.osd.5.vm10.stdout:Mar 09 00:24:13 vm10.local podman[110523]: 2026-03-09 00:24:13.179749373 +0000 UTC m=+0.065221026 container init e0f01bf32c045d0d3facfc4590986563f85813f4f09693a30b59060006a7baa4 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-5-deactivate, CEPH_REF=squid, org.label-schema.build-date=20260223, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2) 2026-03-09T00:24:13.323 INFO:journalctl@ceph.osd.5.vm10.stdout:Mar 09 00:24:13 vm10.local podman[110523]: 2026-03-09 00:24:13.183885774 +0000 UTC m=+0.069357427 container start e0f01bf32c045d0d3facfc4590986563f85813f4f09693a30b59060006a7baa4 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-5-deactivate, ceph=True, org.label-schema.license=GPLv2, org.label-schema.build-date=20260223, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, OSD_FLAVOR=default, CEPH_REF=squid, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image) 2026-03-09T00:24:13.323 INFO:journalctl@ceph.osd.5.vm10.stdout:Mar 09 00:24:13 vm10.local podman[110523]: 2026-03-09 00:24:13.186039615 +0000 UTC m=+0.071511268 container attach e0f01bf32c045d0d3facfc4590986563f85813f4f09693a30b59060006a7baa4 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-5-deactivate, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20260223, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.license=GPLv2) 2026-03-09T00:24:13.323 INFO:journalctl@ceph.osd.5.vm10.stdout:Mar 09 00:24:13 vm10.local podman[110523]: 2026-03-09 00:24:13.127102859 +0000 UTC m=+0.012574522 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc 2026-03-09T00:24:13.352 DEBUG:teuthology.orchestra.run.vm10:> sudo pkill -f 'journalctl -f -n 0 -u ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@osd.5.service' 2026-03-09T00:24:13.383 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-09T00:24:13.383 INFO:tasks.cephadm.osd.5:Stopped osd.5 2026-03-09T00:24:13.383 INFO:tasks.cephadm.osd.6:Stopping osd.6... 2026-03-09T00:24:13.383 DEBUG:teuthology.orchestra.run.vm10:> sudo systemctl stop ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@osd.6 2026-03-09T00:24:13.579 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:24:13 vm10.local systemd[1]: Stopping Ceph osd.6 for fdcbddf6-1b49-11f1-80b0-7392062373f9... 2026-03-09T00:24:13.579 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:24:13 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-6[98928]: 2026-03-09T00:24:13.527+0000 7f3030801640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.6 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-09T00:24:13.579 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:24:13 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-6[98928]: 2026-03-09T00:24:13.527+0000 7f3030801640 -1 osd.6 143 *** Got signal Terminated *** 2026-03-09T00:24:13.579 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:24:13 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-6[98928]: 2026-03-09T00:24:13.527+0000 7f3030801640 -1 osd.6 143 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-09T00:24:14.078 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:24:13 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-7[103089]: 2026-03-09T00:24:13.710+0000 7faf23b2f640 -1 osd.7 143 heartbeat_check: no reply from 192.168.123.104:6806 osd.0 since back 2026-03-09T00:23:43.934169+0000 front 2026-03-09T00:23:43.934128+0000 (oldest deadline 2026-03-09T00:24:08.633918+0000) 2026-03-09T00:24:14.078 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:24:13 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-7[103089]: 2026-03-09T00:24:13.710+0000 7faf23b2f640 -1 osd.7 143 heartbeat_check: no reply from 192.168.123.104:6814 osd.1 since back 2026-03-09T00:23:49.735148+0000 front 2026-03-09T00:23:49.735026+0000 (oldest deadline 2026-03-09T00:24:10.834498+0000) 2026-03-09T00:24:14.078 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:24:13 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-6[98928]: 2026-03-09T00:24:13.865+0000 7f302ce1a640 -1 osd.6 143 heartbeat_check: no reply from 192.168.123.104:6806 osd.0 since back 2026-03-09T00:23:44.043468+0000 front 2026-03-09T00:23:44.043154+0000 (oldest deadline 2026-03-09T00:24:06.343036+0000) 2026-03-09T00:24:14.078 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:24:13 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-6[98928]: 2026-03-09T00:24:13.865+0000 7f302ce1a640 -1 osd.6 143 heartbeat_check: no reply from 192.168.123.104:6814 osd.1 since back 2026-03-09T00:23:46.343597+0000 front 2026-03-09T00:23:46.343397+0000 (oldest deadline 2026-03-09T00:24:11.643253+0000) 2026-03-09T00:24:15.078 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:24:14 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-7[103089]: 2026-03-09T00:24:14.688+0000 7faf23b2f640 -1 osd.7 143 heartbeat_check: no reply from 192.168.123.104:6806 osd.0 since back 2026-03-09T00:23:43.934169+0000 front 2026-03-09T00:23:43.934128+0000 (oldest deadline 2026-03-09T00:24:08.633918+0000) 2026-03-09T00:24:15.078 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:24:14 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-7[103089]: 2026-03-09T00:24:14.688+0000 7faf23b2f640 -1 osd.7 143 heartbeat_check: no reply from 192.168.123.104:6814 osd.1 since back 2026-03-09T00:23:49.735148+0000 front 2026-03-09T00:23:49.735026+0000 (oldest deadline 2026-03-09T00:24:10.834498+0000) 2026-03-09T00:24:15.078 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:24:14 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-6[98928]: 2026-03-09T00:24:14.855+0000 7f302ce1a640 -1 osd.6 143 heartbeat_check: no reply from 192.168.123.104:6806 osd.0 since back 2026-03-09T00:23:44.043468+0000 front 2026-03-09T00:23:44.043154+0000 (oldest deadline 2026-03-09T00:24:06.343036+0000) 2026-03-09T00:24:15.078 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:24:14 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-6[98928]: 2026-03-09T00:24:14.855+0000 7f302ce1a640 -1 osd.6 143 heartbeat_check: no reply from 192.168.123.104:6814 osd.1 since back 2026-03-09T00:23:46.343597+0000 front 2026-03-09T00:23:46.343397+0000 (oldest deadline 2026-03-09T00:24:11.643253+0000) 2026-03-09T00:24:16.078 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:24:15 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-7[103089]: 2026-03-09T00:24:15.689+0000 7faf23b2f640 -1 osd.7 143 heartbeat_check: no reply from 192.168.123.104:6806 osd.0 since back 2026-03-09T00:23:43.934169+0000 front 2026-03-09T00:23:43.934128+0000 (oldest deadline 2026-03-09T00:24:08.633918+0000) 2026-03-09T00:24:16.078 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:24:15 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-7[103089]: 2026-03-09T00:24:15.689+0000 7faf23b2f640 -1 osd.7 143 heartbeat_check: no reply from 192.168.123.104:6814 osd.1 since back 2026-03-09T00:23:49.735148+0000 front 2026-03-09T00:23:49.735026+0000 (oldest deadline 2026-03-09T00:24:10.834498+0000) 2026-03-09T00:24:16.078 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:24:15 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-6[98928]: 2026-03-09T00:24:15.832+0000 7f302ce1a640 -1 osd.6 143 heartbeat_check: no reply from 192.168.123.104:6806 osd.0 since back 2026-03-09T00:23:44.043468+0000 front 2026-03-09T00:23:44.043154+0000 (oldest deadline 2026-03-09T00:24:06.343036+0000) 2026-03-09T00:24:16.078 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:24:15 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-6[98928]: 2026-03-09T00:24:15.832+0000 7f302ce1a640 -1 osd.6 143 heartbeat_check: no reply from 192.168.123.104:6814 osd.1 since back 2026-03-09T00:23:46.343597+0000 front 2026-03-09T00:23:46.343397+0000 (oldest deadline 2026-03-09T00:24:11.643253+0000) 2026-03-09T00:24:17.078 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:24:16 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-7[103089]: 2026-03-09T00:24:16.676+0000 7faf23b2f640 -1 osd.7 143 heartbeat_check: no reply from 192.168.123.104:6806 osd.0 since back 2026-03-09T00:23:43.934169+0000 front 2026-03-09T00:23:43.934128+0000 (oldest deadline 2026-03-09T00:24:08.633918+0000) 2026-03-09T00:24:17.078 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:24:16 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-7[103089]: 2026-03-09T00:24:16.676+0000 7faf23b2f640 -1 osd.7 143 heartbeat_check: no reply from 192.168.123.104:6814 osd.1 since back 2026-03-09T00:23:49.735148+0000 front 2026-03-09T00:23:49.735026+0000 (oldest deadline 2026-03-09T00:24:10.834498+0000) 2026-03-09T00:24:17.078 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:24:16 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-6[98928]: 2026-03-09T00:24:16.876+0000 7f302ce1a640 -1 osd.6 143 heartbeat_check: no reply from 192.168.123.104:6806 osd.0 since back 2026-03-09T00:23:44.043468+0000 front 2026-03-09T00:23:44.043154+0000 (oldest deadline 2026-03-09T00:24:06.343036+0000) 2026-03-09T00:24:17.078 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:24:16 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-6[98928]: 2026-03-09T00:24:16.876+0000 7f302ce1a640 -1 osd.6 143 heartbeat_check: no reply from 192.168.123.104:6814 osd.1 since back 2026-03-09T00:23:46.343597+0000 front 2026-03-09T00:23:46.343397+0000 (oldest deadline 2026-03-09T00:24:11.643253+0000) 2026-03-09T00:24:17.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:24:17 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:24:17.041Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:24:17.350 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:24:17 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:24:17.042Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T00:24:17.927 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:24:17 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-7[103089]: 2026-03-09T00:24:17.656+0000 7faf23b2f640 -1 osd.7 143 heartbeat_check: no reply from 192.168.123.104:6806 osd.0 since back 2026-03-09T00:23:43.934169+0000 front 2026-03-09T00:23:43.934128+0000 (oldest deadline 2026-03-09T00:24:08.633918+0000) 2026-03-09T00:24:17.927 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:24:17 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-7[103089]: 2026-03-09T00:24:17.656+0000 7faf23b2f640 -1 osd.7 143 heartbeat_check: no reply from 192.168.123.104:6814 osd.1 since back 2026-03-09T00:23:49.735148+0000 front 2026-03-09T00:23:49.735026+0000 (oldest deadline 2026-03-09T00:24:10.834498+0000) 2026-03-09T00:24:17.927 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:24:17 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-7[103089]: 2026-03-09T00:24:17.656+0000 7faf23b2f640 -1 osd.7 143 heartbeat_check: no reply from 192.168.123.104:6822 osd.2 since back 2026-03-09T00:23:51.335612+0000 front 2026-03-09T00:23:51.335674+0000 (oldest deadline 2026-03-09T00:24:17.235157+0000) 2026-03-09T00:24:18.328 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:24:17 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-6[98928]: 2026-03-09T00:24:17.925+0000 7f302ce1a640 -1 osd.6 143 heartbeat_check: no reply from 192.168.123.104:6806 osd.0 since back 2026-03-09T00:23:44.043468+0000 front 2026-03-09T00:23:44.043154+0000 (oldest deadline 2026-03-09T00:24:06.343036+0000) 2026-03-09T00:24:18.328 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:24:17 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-6[98928]: 2026-03-09T00:24:17.925+0000 7f302ce1a640 -1 osd.6 143 heartbeat_check: no reply from 192.168.123.104:6814 osd.1 since back 2026-03-09T00:23:46.343597+0000 front 2026-03-09T00:23:46.343397+0000 (oldest deadline 2026-03-09T00:24:11.643253+0000) 2026-03-09T00:24:18.829 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:24:18 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-7[103089]: 2026-03-09T00:24:18.704+0000 7faf23b2f640 -1 osd.7 143 heartbeat_check: no reply from 192.168.123.104:6806 osd.0 since back 2026-03-09T00:23:43.934169+0000 front 2026-03-09T00:23:43.934128+0000 (oldest deadline 2026-03-09T00:24:08.633918+0000) 2026-03-09T00:24:18.829 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:24:18 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-7[103089]: 2026-03-09T00:24:18.704+0000 7faf23b2f640 -1 osd.7 143 heartbeat_check: no reply from 192.168.123.104:6814 osd.1 since back 2026-03-09T00:23:49.735148+0000 front 2026-03-09T00:23:49.735026+0000 (oldest deadline 2026-03-09T00:24:10.834498+0000) 2026-03-09T00:24:18.829 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:24:18 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-7[103089]: 2026-03-09T00:24:18.704+0000 7faf23b2f640 -1 osd.7 143 heartbeat_check: no reply from 192.168.123.104:6822 osd.2 since back 2026-03-09T00:23:51.335612+0000 front 2026-03-09T00:23:51.335674+0000 (oldest deadline 2026-03-09T00:24:17.235157+0000) 2026-03-09T00:24:18.829 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:24:18 vm10.local podman[110619]: 2026-03-09 00:24:18.562931454 +0000 UTC m=+5.051247907 container died d7d72f87911e430f122796c773eafe454c62ab1a31a005422e5aa62985e73b9a (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-6, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team , ceph=True, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, io.buildah.version=1.41.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-09T00:24:18.829 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:24:18 vm10.local podman[110619]: 2026-03-09 00:24:18.580129089 +0000 UTC m=+5.068445542 container remove d7d72f87911e430f122796c773eafe454c62ab1a31a005422e5aa62985e73b9a (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-6, org.opencontainers.image.authors=Ceph Release Team , OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.label-schema.schema-version=1.0, CEPH_REF=squid, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20260223, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.vendor=CentOS) 2026-03-09T00:24:18.829 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:24:18 vm10.local bash[110619]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-6 2026-03-09T00:24:18.829 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:24:18 vm10.local podman[110685]: 2026-03-09 00:24:18.746245622 +0000 UTC m=+0.021436427 container create 837fe991bcf57cb43c846df367cbf69b5b0519701e1944c95d0bab5f1c40bc1c (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-6-deactivate, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team , ceph=True, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.build-date=20260223, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, CEPH_REF=squid, org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-09T00:24:18.829 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:24:18 vm10.local podman[110685]: 2026-03-09 00:24:18.786429091 +0000 UTC m=+0.061619906 container init 837fe991bcf57cb43c846df367cbf69b5b0519701e1944c95d0bab5f1c40bc1c (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-6-deactivate, org.label-schema.schema-version=1.0, org.label-schema.build-date=20260223, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, OSD_FLAVOR=default, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.authors=Ceph Release Team ) 2026-03-09T00:24:18.829 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:24:18 vm10.local podman[110685]: 2026-03-09 00:24:18.790008179 +0000 UTC m=+0.065198994 container start 837fe991bcf57cb43c846df367cbf69b5b0519701e1944c95d0bab5f1c40bc1c (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-6-deactivate, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid, io.buildah.version=1.41.3, ceph=True, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team , GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-09T00:24:18.829 INFO:journalctl@ceph.osd.6.vm10.stdout:Mar 09 00:24:18 vm10.local podman[110685]: 2026-03-09 00:24:18.794984252 +0000 UTC m=+0.070175077 container attach 837fe991bcf57cb43c846df367cbf69b5b0519701e1944c95d0bab5f1c40bc1c (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-6-deactivate, org.label-schema.schema-version=1.0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, io.buildah.version=1.41.3, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=squid, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20260223, ceph=True) 2026-03-09T00:24:18.967 DEBUG:teuthology.orchestra.run.vm10:> sudo pkill -f 'journalctl -f -n 0 -u ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@osd.6.service' 2026-03-09T00:24:19.001 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-09T00:24:19.001 INFO:tasks.cephadm.osd.6:Stopped osd.6 2026-03-09T00:24:19.001 INFO:tasks.cephadm.osd.7:Stopping osd.7... 2026-03-09T00:24:19.002 DEBUG:teuthology.orchestra.run.vm10:> sudo systemctl stop ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@osd.7 2026-03-09T00:24:19.150 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:24:19 vm10.local systemd[1]: Stopping Ceph osd.7 for fdcbddf6-1b49-11f1-80b0-7392062373f9... 2026-03-09T00:24:19.578 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:24:19 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-7[103089]: 2026-03-09T00:24:19.149+0000 7faf27d17640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.7 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-09T00:24:19.578 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:24:19 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-7[103089]: 2026-03-09T00:24:19.149+0000 7faf27d17640 -1 osd.7 143 *** Got signal Terminated *** 2026-03-09T00:24:19.578 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:24:19 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-7[103089]: 2026-03-09T00:24:19.149+0000 7faf27d17640 -1 osd.7 143 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-09T00:24:20.078 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:24:19 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-7[103089]: 2026-03-09T00:24:19.664+0000 7faf23b2f640 -1 osd.7 143 heartbeat_check: no reply from 192.168.123.104:6806 osd.0 since back 2026-03-09T00:23:43.934169+0000 front 2026-03-09T00:23:43.934128+0000 (oldest deadline 2026-03-09T00:24:08.633918+0000) 2026-03-09T00:24:20.078 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:24:19 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-7[103089]: 2026-03-09T00:24:19.664+0000 7faf23b2f640 -1 osd.7 143 heartbeat_check: no reply from 192.168.123.104:6814 osd.1 since back 2026-03-09T00:23:49.735148+0000 front 2026-03-09T00:23:49.735026+0000 (oldest deadline 2026-03-09T00:24:10.834498+0000) 2026-03-09T00:24:20.078 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:24:19 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-7[103089]: 2026-03-09T00:24:19.664+0000 7faf23b2f640 -1 osd.7 143 heartbeat_check: no reply from 192.168.123.104:6822 osd.2 since back 2026-03-09T00:23:51.335612+0000 front 2026-03-09T00:23:51.335674+0000 (oldest deadline 2026-03-09T00:24:17.235157+0000) 2026-03-09T00:24:21.078 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:24:20 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-7[103089]: 2026-03-09T00:24:20.666+0000 7faf23b2f640 -1 osd.7 143 heartbeat_check: no reply from 192.168.123.104:6806 osd.0 since back 2026-03-09T00:23:43.934169+0000 front 2026-03-09T00:23:43.934128+0000 (oldest deadline 2026-03-09T00:24:08.633918+0000) 2026-03-09T00:24:21.078 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:24:20 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-7[103089]: 2026-03-09T00:24:20.666+0000 7faf23b2f640 -1 osd.7 143 heartbeat_check: no reply from 192.168.123.104:6814 osd.1 since back 2026-03-09T00:23:49.735148+0000 front 2026-03-09T00:23:49.735026+0000 (oldest deadline 2026-03-09T00:24:10.834498+0000) 2026-03-09T00:24:21.078 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:24:20 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-7[103089]: 2026-03-09T00:24:20.666+0000 7faf23b2f640 -1 osd.7 143 heartbeat_check: no reply from 192.168.123.104:6822 osd.2 since back 2026-03-09T00:23:51.335612+0000 front 2026-03-09T00:23:51.335674+0000 (oldest deadline 2026-03-09T00:24:17.235157+0000) 2026-03-09T00:24:22.078 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:24:21 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-7[103089]: 2026-03-09T00:24:21.698+0000 7faf23b2f640 -1 osd.7 143 heartbeat_check: no reply from 192.168.123.104:6806 osd.0 since back 2026-03-09T00:23:43.934169+0000 front 2026-03-09T00:23:43.934128+0000 (oldest deadline 2026-03-09T00:24:08.633918+0000) 2026-03-09T00:24:22.078 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:24:21 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-7[103089]: 2026-03-09T00:24:21.698+0000 7faf23b2f640 -1 osd.7 143 heartbeat_check: no reply from 192.168.123.104:6814 osd.1 since back 2026-03-09T00:23:49.735148+0000 front 2026-03-09T00:23:49.735026+0000 (oldest deadline 2026-03-09T00:24:10.834498+0000) 2026-03-09T00:24:22.078 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:24:21 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-7[103089]: 2026-03-09T00:24:21.698+0000 7faf23b2f640 -1 osd.7 143 heartbeat_check: no reply from 192.168.123.104:6822 osd.2 since back 2026-03-09T00:23:51.335612+0000 front 2026-03-09T00:23:51.335674+0000 (oldest deadline 2026-03-09T00:24:17.235157+0000) 2026-03-09T00:24:23.078 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:24:22 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-7[103089]: 2026-03-09T00:24:22.713+0000 7faf23b2f640 -1 osd.7 143 heartbeat_check: no reply from 192.168.123.104:6806 osd.0 since back 2026-03-09T00:23:43.934169+0000 front 2026-03-09T00:23:43.934128+0000 (oldest deadline 2026-03-09T00:24:08.633918+0000) 2026-03-09T00:24:23.078 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:24:22 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-7[103089]: 2026-03-09T00:24:22.713+0000 7faf23b2f640 -1 osd.7 143 heartbeat_check: no reply from 192.168.123.104:6814 osd.1 since back 2026-03-09T00:23:49.735148+0000 front 2026-03-09T00:23:49.735026+0000 (oldest deadline 2026-03-09T00:24:10.834498+0000) 2026-03-09T00:24:23.078 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:24:22 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-7[103089]: 2026-03-09T00:24:22.713+0000 7faf23b2f640 -1 osd.7 143 heartbeat_check: no reply from 192.168.123.104:6822 osd.2 since back 2026-03-09T00:23:51.335612+0000 front 2026-03-09T00:23:51.335674+0000 (oldest deadline 2026-03-09T00:24:17.235157+0000) 2026-03-09T00:24:23.078 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:24:22 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-7[103089]: 2026-03-09T00:24:22.713+0000 7faf23b2f640 -1 osd.7 143 heartbeat_check: no reply from 192.168.123.104:6830 osd.3 since back 2026-03-09T00:23:57.235627+0000 front 2026-03-09T00:23:57.235731+0000 (oldest deadline 2026-03-09T00:24:22.535470+0000) 2026-03-09T00:24:24.078 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:24:23 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-7[103089]: 2026-03-09T00:24:23.718+0000 7faf23b2f640 -1 osd.7 143 heartbeat_check: no reply from 192.168.123.104:6806 osd.0 since back 2026-03-09T00:23:43.934169+0000 front 2026-03-09T00:23:43.934128+0000 (oldest deadline 2026-03-09T00:24:08.633918+0000) 2026-03-09T00:24:24.078 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:24:23 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-7[103089]: 2026-03-09T00:24:23.718+0000 7faf23b2f640 -1 osd.7 143 heartbeat_check: no reply from 192.168.123.104:6814 osd.1 since back 2026-03-09T00:23:49.735148+0000 front 2026-03-09T00:23:49.735026+0000 (oldest deadline 2026-03-09T00:24:10.834498+0000) 2026-03-09T00:24:24.078 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:24:23 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-7[103089]: 2026-03-09T00:24:23.718+0000 7faf23b2f640 -1 osd.7 143 heartbeat_check: no reply from 192.168.123.104:6822 osd.2 since back 2026-03-09T00:23:51.335612+0000 front 2026-03-09T00:23:51.335674+0000 (oldest deadline 2026-03-09T00:24:17.235157+0000) 2026-03-09T00:24:24.078 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:24:23 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-7[103089]: 2026-03-09T00:24:23.718+0000 7faf23b2f640 -1 osd.7 143 heartbeat_check: no reply from 192.168.123.104:6830 osd.3 since back 2026-03-09T00:23:57.235627+0000 front 2026-03-09T00:23:57.235731+0000 (oldest deadline 2026-03-09T00:24:22.535470+0000) 2026-03-09T00:24:24.461 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:24:24 vm10.local podman[110782]: 2026-03-09 00:24:24.187163601 +0000 UTC m=+5.051803567 container died 09c94bcb8e3f593c1c5b80112a36801775641a4ac8d7c51dd2c1f5d601627576 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-7, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, OSD_FLAVOR=default, ceph=True, CEPH_REF=squid, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team ) 2026-03-09T00:24:24.461 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:24:24 vm10.local podman[110782]: 2026-03-09 00:24:24.217576356 +0000 UTC m=+5.082216322 container remove 09c94bcb8e3f593c1c5b80112a36801775641a4ac8d7c51dd2c1f5d601627576 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-7, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.build-date=20260223, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True) 2026-03-09T00:24:24.461 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:24:24 vm10.local bash[110782]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-7 2026-03-09T00:24:24.461 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:24:24 vm10.local podman[110852]: 2026-03-09 00:24:24.368711051 +0000 UTC m=+0.018617674 container create cc3cfb01d1fd31b804743143a18cc8233ae4d316f8eb9b66f80080d4c6a91f61 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-7-deactivate, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, CEPH_REF=squid, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.license=GPLv2, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.build-date=20260223, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-09T00:24:24.461 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:24:24 vm10.local podman[110852]: 2026-03-09 00:24:24.411002185 +0000 UTC m=+0.060908797 container init cc3cfb01d1fd31b804743143a18cc8233ae4d316f8eb9b66f80080d4c6a91f61 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-7-deactivate, ceph=True, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, io.buildah.version=1.41.3, CEPH_REF=squid, org.label-schema.build-date=20260223, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team ) 2026-03-09T00:24:24.461 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:24:24 vm10.local podman[110852]: 2026-03-09 00:24:24.414605909 +0000 UTC m=+0.064512531 container start cc3cfb01d1fd31b804743143a18cc8233ae4d316f8eb9b66f80080d4c6a91f61 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-7-deactivate, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, io.buildah.version=1.41.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.schema-version=1.0, CEPH_REF=squid, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True) 2026-03-09T00:24:24.461 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:24:24 vm10.local podman[110852]: 2026-03-09 00:24:24.415886535 +0000 UTC m=+0.065793147 container attach cc3cfb01d1fd31b804743143a18cc8233ae4d316f8eb9b66f80080d4c6a91f61 (image=quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-osd-7-deactivate, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team , ceph=True, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.build-date=20260223, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9) 2026-03-09T00:24:24.461 INFO:journalctl@ceph.osd.7.vm10.stdout:Mar 09 00:24:24 vm10.local podman[110852]: 2026-03-09 00:24:24.361177422 +0000 UTC m=+0.011084044 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc 2026-03-09T00:24:24.577 DEBUG:teuthology.orchestra.run.vm10:> sudo pkill -f 'journalctl -f -n 0 -u ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@osd.7.service' 2026-03-09T00:24:24.612 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-09T00:24:24.612 INFO:tasks.cephadm.osd.7:Stopped osd.7 2026-03-09T00:24:24.612 INFO:tasks.cephadm.prometheus.a:Stopping prometheus.a... 2026-03-09T00:24:24.612 DEBUG:teuthology.orchestra.run.vm10:> sudo systemctl stop ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@prometheus.a 2026-03-09T00:24:24.761 INFO:journalctl@ceph.prometheus.a.vm10.stdout:Mar 09 00:24:24 vm10.local systemd[1]: Stopping Ceph prometheus.a for fdcbddf6-1b49-11f1-80b0-7392062373f9... 2026-03-09T00:24:24.855 DEBUG:teuthology.orchestra.run.vm10:> sudo pkill -f 'journalctl -f -n 0 -u ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@prometheus.a.service' 2026-03-09T00:24:24.889 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-09T00:24:24.889 INFO:tasks.cephadm.prometheus.a:Stopped prometheus.a 2026-03-09T00:24:24.889 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm rm-cluster --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 --force --keep-logs 2026-03-09T00:24:26.225 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 09 00:24:26 vm04.local systemd[1]: Stopping Ceph node-exporter.a for fdcbddf6-1b49-11f1-80b0-7392062373f9... 2026-03-09T00:24:26.560 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 09 00:24:26 vm04.local podman[130856]: 2026-03-09 00:24:26.309993848 +0000 UTC m=+0.026634273 container died 38e0af6b2fbf68d04000e9cd6e5871604de837a347722e4c9ec49bffb96b81a9 (image=quay.io/prometheus/node-exporter:v1.7.0, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a, maintainer=The Prometheus Authors ) 2026-03-09T00:24:26.560 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 09 00:24:26 vm04.local podman[130856]: 2026-03-09 00:24:26.330150344 +0000 UTC m=+0.046790760 container remove 38e0af6b2fbf68d04000e9cd6e5871604de837a347722e4c9ec49bffb96b81a9 (image=quay.io/prometheus/node-exporter:v1.7.0, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a, maintainer=The Prometheus Authors ) 2026-03-09T00:24:26.560 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 09 00:24:26 vm04.local bash[130856]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-a 2026-03-09T00:24:26.560 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 09 00:24:26 vm04.local systemd[1]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@node-exporter.a.service: Main process exited, code=exited, status=143/n/a 2026-03-09T00:24:26.560 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 09 00:24:26 vm04.local systemd[1]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@node-exporter.a.service: Failed with result 'exit-code'. 2026-03-09T00:24:26.560 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 09 00:24:26 vm04.local systemd[1]: Stopped Ceph node-exporter.a for fdcbddf6-1b49-11f1-80b0-7392062373f9. 2026-03-09T00:24:26.560 INFO:journalctl@ceph.node-exporter.a.vm04.stdout:Mar 09 00:24:26 vm04.local systemd[1]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@node-exporter.a.service: Consumed 1.804s CPU time. 2026-03-09T00:24:26.851 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:24:26 vm04.local systemd[1]: Stopping Ceph alertmanager.a for fdcbddf6-1b49-11f1-80b0-7392062373f9... 2026-03-09T00:24:26.851 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:24:26 vm04.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a[78063]: ts=2026-03-09T00:24:26.629Z caller=main.go:583 level=info msg="Received SIGTERM, exiting gracefully..." 2026-03-09T00:24:26.851 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:24:26 vm04.local podman[130952]: 2026-03-09 00:24:26.640640544 +0000 UTC m=+0.027284391 container died bcac0140b0f61e75c97dfcae8b262e1d44947399e56734f96288fcd04ed24163 (image=quay.io/prometheus/alertmanager:v0.25.0, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a, maintainer=The Prometheus Authors ) 2026-03-09T00:24:26.851 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:24:26 vm04.local podman[130952]: 2026-03-09 00:24:26.651949171 +0000 UTC m=+0.038593017 container remove bcac0140b0f61e75c97dfcae8b262e1d44947399e56734f96288fcd04ed24163 (image=quay.io/prometheus/alertmanager:v0.25.0, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a, maintainer=The Prometheus Authors ) 2026-03-09T00:24:26.851 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:24:26 vm04.local podman[130952]: 2026-03-09 00:24:26.653147202 +0000 UTC m=+0.039791059 volume remove de656a11509b9d5d091140184efa8f0f771ed30a8c2c691c77d0569313c1fc73 2026-03-09T00:24:26.851 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:24:26 vm04.local bash[130952]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-alertmanager-a 2026-03-09T00:24:26.851 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:24:26 vm04.local systemd[1]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@alertmanager.a.service: Deactivated successfully. 2026-03-09T00:24:26.851 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:24:26 vm04.local systemd[1]: Stopped Ceph alertmanager.a for fdcbddf6-1b49-11f1-80b0-7392062373f9. 2026-03-09T00:24:26.851 INFO:journalctl@ceph.alertmanager.a.vm04.stdout:Mar 09 00:24:26 vm04.local systemd[1]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@alertmanager.a.service: Consumed 1.598s CPU time. 2026-03-09T00:24:47.841 DEBUG:teuthology.orchestra.run.vm10:> sudo /home/ubuntu/cephtest/cephadm rm-cluster --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 --force --keep-logs 2026-03-09T00:24:49.190 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 09 00:24:49 vm10.local systemd[1]: Stopping Ceph node-exporter.b for fdcbddf6-1b49-11f1-80b0-7392062373f9... 2026-03-09T00:24:49.190 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 09 00:24:49 vm10.local podman[111316]: 2026-03-09 00:24:49.179958908 +0000 UTC m=+0.019526934 container died d059c0022310421951c62bc3da32e0426d718d58f39e255eaade8d1ff44ff34f (image=quay.io/prometheus/node-exporter:v1.7.0, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b, maintainer=The Prometheus Authors ) 2026-03-09T00:24:49.462 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 09 00:24:49 vm10.local podman[111316]: 2026-03-09 00:24:49.194878007 +0000 UTC m=+0.034446033 container remove d059c0022310421951c62bc3da32e0426d718d58f39e255eaade8d1ff44ff34f (image=quay.io/prometheus/node-exporter:v1.7.0, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b, maintainer=The Prometheus Authors ) 2026-03-09T00:24:49.463 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 09 00:24:49 vm10.local bash[111316]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-node-exporter-b 2026-03-09T00:24:49.463 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 09 00:24:49 vm10.local systemd[1]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@node-exporter.b.service: Main process exited, code=exited, status=143/n/a 2026-03-09T00:24:49.463 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 09 00:24:49 vm10.local systemd[1]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@node-exporter.b.service: Failed with result 'exit-code'. 2026-03-09T00:24:49.463 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 09 00:24:49 vm10.local systemd[1]: Stopped Ceph node-exporter.b for fdcbddf6-1b49-11f1-80b0-7392062373f9. 2026-03-09T00:24:49.463 INFO:journalctl@ceph.node-exporter.b.vm10.stdout:Mar 09 00:24:49 vm10.local systemd[1]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@node-exporter.b.service: Consumed 1.743s CPU time. 2026-03-09T00:24:49.712 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:24:49 vm10.local systemd[1]: Stopping Ceph grafana.a for fdcbddf6-1b49-11f1-80b0-7392062373f9... 2026-03-09T00:24:49.987 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:24:49 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=server t=2026-03-09T00:24:49.711370537Z level=info msg="Shutdown started" reason="System signal: terminated" 2026-03-09T00:24:49.987 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:24:49 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=tracing t=2026-03-09T00:24:49.711903306Z level=info msg="Closing tracing" 2026-03-09T00:24:49.987 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:24:49 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=ticker t=2026-03-09T00:24:49.712074105Z level=info msg=stopped last_tick=2026-03-09T00:24:40Z 2026-03-09T00:24:49.987 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:24:49 vm10.local ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a[80516]: logger=grafana-apiserver t=2026-03-09T00:24:49.712193177Z level=info msg="StorageObjectCountTracker pruner is exiting" 2026-03-09T00:24:49.987 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:24:49 vm10.local podman[111451]: 2026-03-09 00:24:49.724359696 +0000 UTC m=+0.030199607 container died aa7f793dcb8e345e7f6e34e7964aa0fac96c4950dac46070e35e16b19fd8c446 (image=quay.io/ceph/grafana:10.4.0, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a, maintainer=Grafana Labs ) 2026-03-09T00:24:49.988 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:24:49 vm10.local podman[111451]: 2026-03-09 00:24:49.765002135 +0000 UTC m=+0.070842046 container remove aa7f793dcb8e345e7f6e34e7964aa0fac96c4950dac46070e35e16b19fd8c446 (image=quay.io/ceph/grafana:10.4.0, name=ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a, maintainer=Grafana Labs ) 2026-03-09T00:24:49.988 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:24:49 vm10.local bash[111451]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9-grafana-a 2026-03-09T00:24:49.988 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:24:49 vm10.local systemd[1]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@grafana.a.service: Deactivated successfully. 2026-03-09T00:24:49.988 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:24:49 vm10.local systemd[1]: Stopped Ceph grafana.a for fdcbddf6-1b49-11f1-80b0-7392062373f9. 2026-03-09T00:24:49.988 INFO:journalctl@ceph.grafana.a.vm10.stdout:Mar 09 00:24:49 vm10.local systemd[1]: ceph-fdcbddf6-1b49-11f1-80b0-7392062373f9@grafana.a.service: Consumed 4.137s CPU time. 2026-03-09T00:25:00.661 DEBUG:teuthology.orchestra.run.vm04:> sudo rm -f /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 2026-03-09T00:25:00.691 DEBUG:teuthology.orchestra.run.vm10:> sudo rm -f /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 2026-03-09T00:25:00.720 INFO:tasks.cephadm:Archiving crash dumps... 2026-03-09T00:25:00.720 DEBUG:teuthology.misc:Transferring archived files from vm04:/var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/crash to /archive/kyr-2026-03-08_22:22:45-orch:cephadm-squid-none-default-vps/307/remote/vm04/crash 2026-03-09T00:25:00.720 DEBUG:teuthology.orchestra.run.vm04:> sudo tar c -f - -C /var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/crash -- . 2026-03-09T00:25:00.762 INFO:teuthology.orchestra.run.vm04.stderr:tar: /var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/crash: Cannot open: No such file or directory 2026-03-09T00:25:00.762 INFO:teuthology.orchestra.run.vm04.stderr:tar: Error is not recoverable: exiting now 2026-03-09T00:25:00.763 DEBUG:teuthology.misc:Transferring archived files from vm10:/var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/crash to /archive/kyr-2026-03-08_22:22:45-orch:cephadm-squid-none-default-vps/307/remote/vm10/crash 2026-03-09T00:25:00.764 DEBUG:teuthology.orchestra.run.vm10:> sudo tar c -f - -C /var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/crash -- . 2026-03-09T00:25:00.789 INFO:teuthology.orchestra.run.vm10.stderr:tar: /var/lib/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/crash: Cannot open: No such file or directory 2026-03-09T00:25:00.789 INFO:teuthology.orchestra.run.vm10.stderr:tar: Error is not recoverable: exiting now 2026-03-09T00:25:00.791 INFO:tasks.cephadm:Checking cluster log for badness... 2026-03-09T00:25:00.791 DEBUG:teuthology.orchestra.run.vm04:> sudo egrep '\[ERR\]|\[WRN\]|\[SEC\]' /var/log/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/ceph.log | egrep CEPHADM_ | egrep -v '\(MDS_ALL_DOWN\)' | egrep -v '\(MDS_UP_LESS_THAN_MAX\)' | egrep -v CEPHADM_STRAY_DAEMON | egrep -v CEPHADM_FAILED_DAEMON | egrep -v CEPHADM_AGENT_DOWN | head -n 1 2026-03-09T00:25:00.838 INFO:tasks.cephadm:Compressing logs... 2026-03-09T00:25:00.838 DEBUG:teuthology.orchestra.run.vm04:> time sudo find /var/log/ceph /var/log/rbd-target-api -name '*.log' -print0 | sudo xargs --max-args=1 --max-procs=0 --verbose -0 --no-run-if-empty -- gzip -5 --verbose -- 2026-03-09T00:25:00.880 DEBUG:teuthology.orchestra.run.vm10:> time sudo find /var/log/ceph /var/log/rbd-target-api -name '*.log' -print0 | sudo xargs --max-args=1 --max-procs=0 --verbose -0 --no-run-if-empty -- gzip -5 --verbose -- 2026-03-09T00:25:00.905 INFO:teuthology.orchestra.run.vm10.stderr:gzip -5 --verbose -- /var/log/ceph/cephadm.log 2026-03-09T00:25:00.905 INFO:teuthology.orchestra.run.vm10.stderr:find: ‘/var/log/rbd-target-api’: No such file or directory 2026-03-09T00:25:00.906 INFO:teuthology.orchestra.run.vm10.stderr:gzip -5 --verbose -- /var/log/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/ceph-volume.log 2026-03-09T00:25:00.907 INFO:teuthology.orchestra.run.vm10.stderr:gzip -5 --verbose -- /var/log/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/ceph-mon.b.log 2026-03-09T00:25:00.907 INFO:teuthology.orchestra.run.vm04.stderr:gzip -5 --verbose -- /var/log/ceph/cephadm.log 2026-03-09T00:25:00.907 INFO:teuthology.orchestra.run.vm04.stderr:find: ‘/var/log/rbd-target-api’: No such file or directory 2026-03-09T00:25:00.907 INFO:teuthology.orchestra.run.vm10.stderr:/var/log/ceph/cephadm.log: /var/log/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/ceph-volume.log: gzip -5 --verbose -- /var/log/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/ceph.cephadm.log 2026-03-09T00:25:00.909 INFO:teuthology.orchestra.run.vm04.stderr:gzip -5 --verbose -- /var/log/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/ceph-mon.a.log 2026-03-09T00:25:00.909 INFO:teuthology.orchestra.run.vm04.stderr:/var/log/ceph/cephadm.log: gzip -5 --verbose -- /var/log/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/ceph.log 2026-03-09T00:25:00.914 INFO:teuthology.orchestra.run.vm04.stderr:/var/log/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/ceph-mon.a.log: gzip -5 --verbose -- /var/log/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/ceph-mgr.y.log 2026-03-09T00:25:00.917 INFO:teuthology.orchestra.run.vm04.stderr:/var/log/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/ceph.log: 92.0% -- replaced with /var/log/ceph/cephadm.log.gz 2026-03-09T00:25:00.918 INFO:teuthology.orchestra.run.vm04.stderr:gzip -5 --verbose -- /var/log/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/ceph.audit.log 2026-03-09T00:25:00.920 INFO:teuthology.orchestra.run.vm04.stderr:/var/log/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/ceph-mgr.y.log: 93.7% -- replaced with /var/log/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/ceph.log.gz 2026-03-09T00:25:00.921 INFO:teuthology.orchestra.run.vm10.stderr:/var/log/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/ceph-mon.b.log: 94.2% -- replaced with /var/log/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/ceph-volume.log.gz 2026-03-09T00:25:00.921 INFO:teuthology.orchestra.run.vm10.stderr:gzip -5 --verbose -- /var/log/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/ceph.audit.log 2026-03-09T00:25:00.922 INFO:teuthology.orchestra.run.vm10.stderr:/var/log/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/ceph.cephadm.log: 87.4% -- replaced with /var/log/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/ceph.cephadm.log.gz 2026-03-09T00:25:00.924 INFO:teuthology.orchestra.run.vm10.stderr: 91.1% -- replaced with /var/log/ceph/cephadm.log.gz 2026-03-09T00:25:00.924 INFO:teuthology.orchestra.run.vm04.stderr:gzip -5 --verbose -- /var/log/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/ceph.cephadm.log 2026-03-09T00:25:00.927 INFO:teuthology.orchestra.run.vm10.stderr:gzip -5 --verbose -- /var/log/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/ceph.log 2026-03-09T00:25:00.929 INFO:teuthology.orchestra.run.vm04.stderr:/var/log/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/ceph.audit.log: gzip -5 --verbose -- /var/log/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/ceph-volume.log 2026-03-09T00:25:00.931 INFO:teuthology.orchestra.run.vm04.stderr:/var/log/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/ceph.cephadm.log: 92.8% -- replaced with /var/log/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/ceph.cephadm.log.gz 2026-03-09T00:25:00.931 INFO:teuthology.orchestra.run.vm10.stderr:/var/log/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/ceph.audit.log: 91.1% -- replaced with /var/log/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/ceph.audit.log.gz 2026-03-09T00:25:00.931 INFO:teuthology.orchestra.run.vm10.stderr:gzip -5 --verbose -- /var/log/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/ceph-mgr.x.log 2026-03-09T00:25:00.933 INFO:teuthology.orchestra.run.vm04.stderr:gzip -5 --verbose -- /var/log/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/ceph-mon.c.log 2026-03-09T00:25:00.934 INFO:teuthology.orchestra.run.vm10.stderr:/var/log/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/ceph.log: 88.5% -- replaced with /var/log/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/ceph.log.gz 2026-03-09T00:25:00.934 INFO:teuthology.orchestra.run.vm10.stderr:gzip -5 --verbose -- /var/log/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/ceph-osd.4.log 2026-03-09T00:25:00.935 INFO:teuthology.orchestra.run.vm10.stderr:/var/log/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/ceph-mgr.x.log: gzip -5 --verbose -- /var/log/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/ceph-osd.5.log 2026-03-09T00:25:00.940 INFO:teuthology.orchestra.run.vm04.stderr:/var/log/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/ceph-volume.log: 94.4% -- replaced with /var/log/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/ceph.audit.log.gz 2026-03-09T00:25:00.941 INFO:teuthology.orchestra.run.vm04.stderr:gzip -5 --verbose -- /var/log/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/ceph-osd.0.log 2026-03-09T00:25:00.941 INFO:teuthology.orchestra.run.vm10.stderr:/var/log/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/ceph-osd.4.log: gzip -5 --verbose -- /var/log/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/ceph-osd.6.log 2026-03-09T00:25:00.949 INFO:teuthology.orchestra.run.vm10.stderr:/var/log/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/ceph-osd.5.log: gzip -5 --verbose -- /var/log/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/ceph-osd.7.log 2026-03-09T00:25:00.951 INFO:teuthology.orchestra.run.vm04.stderr:/var/log/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/ceph-mon.c.log: gzip -5 --verbose -- /var/log/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/ceph-osd.1.log 2026-03-09T00:25:00.953 INFO:teuthology.orchestra.run.vm10.stderr:/var/log/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/ceph-osd.6.log: gzip -5 --verbose -- /var/log/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/ceph-client.rgw.foo.vm10.dwizvi.log 2026-03-09T00:25:00.955 INFO:teuthology.orchestra.run.vm04.stderr:/var/log/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/ceph-osd.0.log: gzip -5 --verbose -- /var/log/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/ceph-osd.2.log 2026-03-09T00:25:00.956 INFO:teuthology.orchestra.run.vm04.stderr:/var/log/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/ceph-osd.1.log: 94.2% -- replaced with /var/log/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/ceph-volume.log.gz 2026-03-09T00:25:00.961 INFO:teuthology.orchestra.run.vm04.stderr:gzip -5 --verbose -- /var/log/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/ceph-osd.3.log 2026-03-09T00:25:00.966 INFO:teuthology.orchestra.run.vm10.stderr:/var/log/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/ceph-osd.7.log: /var/log/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/ceph-client.rgw.foo.vm10.dwizvi.log: 75.4% -- replaced with /var/log/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/ceph-client.rgw.foo.vm10.dwizvi.log.gz 2026-03-09T00:25:00.969 INFO:teuthology.orchestra.run.vm04.stderr:/var/log/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/ceph-osd.2.log: gzip -5 --verbose -- /var/log/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/ceph-client.rgw.foo.vm04.ehrfsf.log 2026-03-09T00:25:00.979 INFO:teuthology.orchestra.run.vm04.stderr:/var/log/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/ceph-osd.3.log: gzip -5 --verbose -- /var/log/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/tcmu-runner.log 2026-03-09T00:25:00.980 INFO:teuthology.orchestra.run.vm04.stderr:/var/log/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/ceph-client.rgw.foo.vm04.ehrfsf.log: 75.1% -- replaced with /var/log/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/ceph-client.rgw.foo.vm04.ehrfsf.log.gz 2026-03-09T00:25:00.988 INFO:teuthology.orchestra.run.vm04.stderr:/var/log/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/tcmu-runner.log: 87.7% -- replaced with /var/log/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/tcmu-runner.log.gz 2026-03-09T00:25:01.137 INFO:teuthology.orchestra.run.vm10.stderr: 90.2% -- replaced with /var/log/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/ceph-mgr.x.log.gz 2026-03-09T00:25:01.546 INFO:teuthology.orchestra.run.vm04.stderr: 89.9% -- replaced with /var/log/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/ceph-mgr.y.log.gz 2026-03-09T00:25:01.804 INFO:teuthology.orchestra.run.vm10.stderr: 92.5% -- replaced with /var/log/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/ceph-mon.b.log.gz 2026-03-09T00:25:01.856 INFO:teuthology.orchestra.run.vm04.stderr: 92.5% -- replaced with /var/log/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/ceph-mon.c.log.gz 2026-03-09T00:25:02.982 INFO:teuthology.orchestra.run.vm10.stderr: 93.6% -- replaced with /var/log/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/ceph-osd.6.log.gz 2026-03-09T00:25:02.985 INFO:teuthology.orchestra.run.vm04.stderr: 93.7% -- replaced with /var/log/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/ceph-osd.2.log.gz 2026-03-09T00:25:03.116 INFO:teuthology.orchestra.run.vm04.stderr: 91.2% -- replaced with /var/log/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/ceph-mon.a.log.gz 2026-03-09T00:25:03.200 INFO:teuthology.orchestra.run.vm10.stderr: 93.7% -- replaced with /var/log/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/ceph-osd.5.log.gz 2026-03-09T00:25:03.249 INFO:teuthology.orchestra.run.vm10.stderr: 94.2% -- replaced with /var/log/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/ceph-osd.7.log.gz 2026-03-09T00:25:03.334 INFO:teuthology.orchestra.run.vm04.stderr: 93.8% -- replaced with /var/log/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/ceph-osd.0.log.gz 2026-03-09T00:25:03.451 INFO:teuthology.orchestra.run.vm10.stderr: 93.9% -- replaced with /var/log/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/ceph-osd.4.log.gz 2026-03-09T00:25:03.453 INFO:teuthology.orchestra.run.vm10.stderr: 2026-03-09T00:25:03.453 INFO:teuthology.orchestra.run.vm10.stderr:real 0m2.558s 2026-03-09T00:25:03.453 INFO:teuthology.orchestra.run.vm10.stderr:user 0m4.460s 2026-03-09T00:25:03.453 INFO:teuthology.orchestra.run.vm10.stderr:sys 0m0.279s 2026-03-09T00:25:03.546 INFO:teuthology.orchestra.run.vm04.stderr: 93.8% -- replaced with /var/log/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/ceph-osd.1.log.gz 2026-03-09T00:25:03.731 INFO:teuthology.orchestra.run.vm04.stderr: 93.8% -- replaced with /var/log/ceph/fdcbddf6-1b49-11f1-80b0-7392062373f9/ceph-osd.3.log.gz 2026-03-09T00:25:03.733 INFO:teuthology.orchestra.run.vm04.stderr: 2026-03-09T00:25:03.733 INFO:teuthology.orchestra.run.vm04.stderr:real 0m2.838s 2026-03-09T00:25:03.733 INFO:teuthology.orchestra.run.vm04.stderr:user 0m5.182s 2026-03-09T00:25:03.733 INFO:teuthology.orchestra.run.vm04.stderr:sys 0m0.286s 2026-03-09T00:25:03.734 INFO:tasks.cephadm:Archiving logs... 2026-03-09T00:25:03.734 DEBUG:teuthology.misc:Transferring archived files from vm04:/var/log/ceph to /archive/kyr-2026-03-08_22:22:45-orch:cephadm-squid-none-default-vps/307/remote/vm04/log 2026-03-09T00:25:03.734 DEBUG:teuthology.orchestra.run.vm04:> sudo tar c -f - -C /var/log/ceph -- . 2026-03-09T00:25:04.090 DEBUG:teuthology.misc:Transferring archived files from vm10:/var/log/ceph to /archive/kyr-2026-03-08_22:22:45-orch:cephadm-squid-none-default-vps/307/remote/vm10/log 2026-03-09T00:25:04.090 DEBUG:teuthology.orchestra.run.vm10:> sudo tar c -f - -C /var/log/ceph -- . 2026-03-09T00:25:04.348 INFO:tasks.cephadm:Removing cluster... 2026-03-09T00:25:04.348 DEBUG:teuthology.orchestra.run.vm04:> sudo /home/ubuntu/cephtest/cephadm rm-cluster --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 --force 2026-03-09T00:25:04.585 DEBUG:teuthology.orchestra.run.vm10:> sudo /home/ubuntu/cephtest/cephadm rm-cluster --fsid fdcbddf6-1b49-11f1-80b0-7392062373f9 --force 2026-03-09T00:25:04.787 INFO:tasks.cephadm:Removing cephadm ... 2026-03-09T00:25:04.787 DEBUG:teuthology.orchestra.run.vm04:> rm -rf /home/ubuntu/cephtest/cephadm 2026-03-09T00:25:04.803 DEBUG:teuthology.orchestra.run.vm10:> rm -rf /home/ubuntu/cephtest/cephadm 2026-03-09T00:25:04.820 INFO:tasks.cephadm:Teardown complete 2026-03-09T00:25:04.820 DEBUG:teuthology.run_tasks:Unwinding manager clock 2026-03-09T00:25:04.822 INFO:teuthology.task.clock:Checking final clock skew... 2026-03-09T00:25:04.822 DEBUG:teuthology.orchestra.run.vm04:> PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-03-09T00:25:04.845 DEBUG:teuthology.orchestra.run.vm10:> PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-03-09T00:25:04.860 INFO:teuthology.orchestra.run.vm04.stderr:bash: line 1: ntpq: command not found 2026-03-09T00:25:04.878 INFO:teuthology.orchestra.run.vm10.stderr:bash: line 1: ntpq: command not found 2026-03-09T00:25:04.985 INFO:teuthology.orchestra.run.vm10.stdout:MS Name/IP address Stratum Poll Reach LastRx Last sample 2026-03-09T00:25:04.985 INFO:teuthology.orchestra.run.vm10.stdout:=============================================================================== 2026-03-09T00:25:04.985 INFO:teuthology.orchestra.run.vm10.stdout:^+ timegoesbrrr.net 2 7 377 80 +302us[ +302us] +/- 25ms 2026-03-09T00:25:04.985 INFO:teuthology.orchestra.run.vm10.stdout:^* stratum2-3.NTP.TechFak.U> 2 8 377 84 -265us[ -247us] +/- 17ms 2026-03-09T00:25:04.985 INFO:teuthology.orchestra.run.vm10.stdout:^+ de.relay.mahi.be 3 6 377 19 -2144us[-2144us] +/- 19ms 2026-03-09T00:25:04.985 INFO:teuthology.orchestra.run.vm10.stdout:^+ 139-144-71-56.ip.linodeu> 2 6 377 20 +3429us[+3429us] +/- 31ms 2026-03-09T00:25:04.985 INFO:teuthology.orchestra.run.vm04.stdout:MS Name/IP address Stratum Poll Reach LastRx Last sample 2026-03-09T00:25:04.986 INFO:teuthology.orchestra.run.vm04.stdout:=============================================================================== 2026-03-09T00:25:04.986 INFO:teuthology.orchestra.run.vm04.stdout:^+ de.relay.mahi.be 3 6 377 22 -1781us[-1811us] +/- 19ms 2026-03-09T00:25:04.986 INFO:teuthology.orchestra.run.vm04.stdout:^+ 139-144-71-56.ip.linodeu> 2 6 377 15 +3850us[+3850us] +/- 31ms 2026-03-09T00:25:04.986 INFO:teuthology.orchestra.run.vm04.stdout:^+ timegoesbrrr.net 2 6 377 20 +808us[ +778us] +/- 26ms 2026-03-09T00:25:04.986 INFO:teuthology.orchestra.run.vm04.stdout:^* stratum2-3.NTP.TechFak.U> 2 6 377 18 -914us[ -944us] +/- 17ms 2026-03-09T00:25:04.986 DEBUG:teuthology.run_tasks:Unwinding manager ansible.cephlab 2026-03-09T00:25:04.989 INFO:teuthology.task.ansible:Skipping ansible cleanup... 2026-03-09T00:25:04.989 DEBUG:teuthology.run_tasks:Unwinding manager selinux 2026-03-09T00:25:04.992 DEBUG:teuthology.run_tasks:Unwinding manager pcp 2026-03-09T00:25:04.994 DEBUG:teuthology.run_tasks:Unwinding manager internal.timer 2026-03-09T00:25:04.997 INFO:teuthology.task.internal:Duration was 2015.643713 seconds 2026-03-09T00:25:04.997 DEBUG:teuthology.run_tasks:Unwinding manager internal.syslog 2026-03-09T00:25:05.000 INFO:teuthology.task.internal.syslog:Shutting down syslog monitoring... 2026-03-09T00:25:05.000 DEBUG:teuthology.orchestra.run.vm04:> sudo rm -f -- /etc/rsyslog.d/80-cephtest.conf && sudo service rsyslog restart 2026-03-09T00:25:05.029 DEBUG:teuthology.orchestra.run.vm10:> sudo rm -f -- /etc/rsyslog.d/80-cephtest.conf && sudo service rsyslog restart 2026-03-09T00:25:05.078 INFO:teuthology.orchestra.run.vm10.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-03-09T00:25:05.080 INFO:teuthology.orchestra.run.vm04.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-03-09T00:25:05.382 INFO:teuthology.task.internal.syslog:Checking logs for errors... 2026-03-09T00:25:05.382 DEBUG:teuthology.task.internal.syslog:Checking ubuntu@vm04.local 2026-03-09T00:25:05.382 DEBUG:teuthology.orchestra.run.vm04:> grep -E --binary-files=text '\bBUG\b|\bINFO\b|\bDEADLOCK\b' /home/ubuntu/cephtest/archive/syslog/kern.log | grep -v 'task .* blocked for more than .* seconds' | grep -v 'lockdep is turned off' | grep -v 'trying to register non-static key' | grep -v 'DEBUG: fsize' | grep -v CRON | grep -v 'BUG: bad unlock balance detected' | grep -v 'inconsistent lock state' | grep -v '*** DEADLOCK ***' | grep -v 'INFO: possible irq lock inversion dependency detected' | grep -v 'INFO: NMI handler (perf_event_nmi_handler) took too long to run' | grep -v 'INFO: recovery required on readonly' | grep -v 'ceph-create-keys: INFO' | grep -v INFO:ceph-create-keys | grep -v 'Loaded datasource DataSourceOpenStack' | grep -v 'container-storage-setup: INFO: Volume group backing root filesystem could not be determined' | grep -E -v '\bsalt-master\b|\bsalt-minion\b|\bsalt-api\b' | grep -v ceph-crash | grep -E -v '\btcmu-runner\b.*\bINFO\b' | head -n 1 2026-03-09T00:25:05.411 DEBUG:teuthology.task.internal.syslog:Checking ubuntu@vm10.local 2026-03-09T00:25:05.411 DEBUG:teuthology.orchestra.run.vm10:> grep -E --binary-files=text '\bBUG\b|\bINFO\b|\bDEADLOCK\b' /home/ubuntu/cephtest/archive/syslog/kern.log | grep -v 'task .* blocked for more than .* seconds' | grep -v 'lockdep is turned off' | grep -v 'trying to register non-static key' | grep -v 'DEBUG: fsize' | grep -v CRON | grep -v 'BUG: bad unlock balance detected' | grep -v 'inconsistent lock state' | grep -v '*** DEADLOCK ***' | grep -v 'INFO: possible irq lock inversion dependency detected' | grep -v 'INFO: NMI handler (perf_event_nmi_handler) took too long to run' | grep -v 'INFO: recovery required on readonly' | grep -v 'ceph-create-keys: INFO' | grep -v INFO:ceph-create-keys | grep -v 'Loaded datasource DataSourceOpenStack' | grep -v 'container-storage-setup: INFO: Volume group backing root filesystem could not be determined' | grep -E -v '\bsalt-master\b|\bsalt-minion\b|\bsalt-api\b' | grep -v ceph-crash | grep -E -v '\btcmu-runner\b.*\bINFO\b' | head -n 1 2026-03-09T00:25:05.442 INFO:teuthology.task.internal.syslog:Gathering journactl... 2026-03-09T00:25:05.442 DEBUG:teuthology.orchestra.run.vm04:> sudo journalctl > /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-03-09T00:25:05.453 DEBUG:teuthology.orchestra.run.vm10:> sudo journalctl > /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-03-09T00:25:06.090 INFO:teuthology.task.internal.syslog:Compressing syslogs... 2026-03-09T00:25:06.090 DEBUG:teuthology.orchestra.run.vm04:> find /home/ubuntu/cephtest/archive/syslog -name '*.log' -print0 | sudo xargs -0 --max-args=1 --max-procs=0 --verbose --no-run-if-empty -- gzip -5 --verbose -- 2026-03-09T00:25:06.092 DEBUG:teuthology.orchestra.run.vm10:> find /home/ubuntu/cephtest/archive/syslog -name '*.log' -print0 | sudo xargs -0 --max-args=1 --max-procs=0 --verbose --no-run-if-empty -- gzip -5 --verbose -- 2026-03-09T00:25:06.118 INFO:teuthology.orchestra.run.vm04.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-09T00:25:06.118 INFO:teuthology.orchestra.run.vm04.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-09T00:25:06.118 INFO:teuthology.orchestra.run.vm04.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-03-09T00:25:06.118 INFO:teuthology.orchestra.run.vm04.stderr:/home/ubuntu/cephtest/archive/syslog/kern.log: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/kern.log.gz 2026-03-09T00:25:06.119 INFO:teuthology.orchestra.run.vm04.stderr:/home/ubuntu/cephtest/archive/syslog/misc.log: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/misc.log.gz 2026-03-09T00:25:06.120 INFO:teuthology.orchestra.run.vm10.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-09T00:25:06.121 INFO:teuthology.orchestra.run.vm10.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-09T00:25:06.121 INFO:teuthology.orchestra.run.vm10.stderr:/home/ubuntu/cephtest/archive/syslog/kern.log: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/kern.log.gz 2026-03-09T00:25:06.121 INFO:teuthology.orchestra.run.vm10.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-03-09T00:25:06.122 INFO:teuthology.orchestra.run.vm10.stderr:/home/ubuntu/cephtest/archive/syslog/misc.log: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/misc.log.gz 2026-03-09T00:25:06.277 INFO:teuthology.orchestra.run.vm10.stderr:/home/ubuntu/cephtest/archive/syslog/journalctl.log: 97.7% -- replaced with /home/ubuntu/cephtest/archive/syslog/journalctl.log.gz 2026-03-09T00:25:06.296 INFO:teuthology.orchestra.run.vm04.stderr:/home/ubuntu/cephtest/archive/syslog/journalctl.log: 96.7% -- replaced with /home/ubuntu/cephtest/archive/syslog/journalctl.log.gz 2026-03-09T00:25:06.298 DEBUG:teuthology.run_tasks:Unwinding manager internal.sudo 2026-03-09T00:25:06.301 INFO:teuthology.task.internal:Restoring /etc/sudoers... 2026-03-09T00:25:06.301 DEBUG:teuthology.orchestra.run.vm04:> sudo mv -f /etc/sudoers.orig.teuthology /etc/sudoers 2026-03-09T00:25:06.367 DEBUG:teuthology.orchestra.run.vm10:> sudo mv -f /etc/sudoers.orig.teuthology /etc/sudoers 2026-03-09T00:25:06.397 DEBUG:teuthology.run_tasks:Unwinding manager internal.coredump 2026-03-09T00:25:06.400 DEBUG:teuthology.orchestra.run.vm04:> sudo sysctl -w kernel.core_pattern=core && sudo bash -c 'for f in `find /home/ubuntu/cephtest/archive/coredump -type f`; do file $f | grep -q systemd-sysusers && rm $f || true ; done' && rmdir --ignore-fail-on-non-empty -- /home/ubuntu/cephtest/archive/coredump 2026-03-09T00:25:06.410 DEBUG:teuthology.orchestra.run.vm10:> sudo sysctl -w kernel.core_pattern=core && sudo bash -c 'for f in `find /home/ubuntu/cephtest/archive/coredump -type f`; do file $f | grep -q systemd-sysusers && rm $f || true ; done' && rmdir --ignore-fail-on-non-empty -- /home/ubuntu/cephtest/archive/coredump 2026-03-09T00:25:06.438 INFO:teuthology.orchestra.run.vm04.stdout:kernel.core_pattern = core 2026-03-09T00:25:06.463 INFO:teuthology.orchestra.run.vm10.stdout:kernel.core_pattern = core 2026-03-09T00:25:06.480 DEBUG:teuthology.orchestra.run.vm04:> test -e /home/ubuntu/cephtest/archive/coredump 2026-03-09T00:25:06.511 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-09T00:25:06.511 DEBUG:teuthology.orchestra.run.vm10:> test -e /home/ubuntu/cephtest/archive/coredump 2026-03-09T00:25:06.540 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-09T00:25:06.540 DEBUG:teuthology.run_tasks:Unwinding manager internal.archive 2026-03-09T00:25:06.543 INFO:teuthology.task.internal:Transferring archived files... 2026-03-09T00:25:06.544 DEBUG:teuthology.misc:Transferring archived files from vm04:/home/ubuntu/cephtest/archive to /archive/kyr-2026-03-08_22:22:45-orch:cephadm-squid-none-default-vps/307/remote/vm04 2026-03-09T00:25:06.544 DEBUG:teuthology.orchestra.run.vm04:> sudo tar c -f - -C /home/ubuntu/cephtest/archive -- . 2026-03-09T00:25:06.587 DEBUG:teuthology.misc:Transferring archived files from vm10:/home/ubuntu/cephtest/archive to /archive/kyr-2026-03-08_22:22:45-orch:cephadm-squid-none-default-vps/307/remote/vm10 2026-03-09T00:25:06.587 DEBUG:teuthology.orchestra.run.vm10:> sudo tar c -f - -C /home/ubuntu/cephtest/archive -- . 2026-03-09T00:25:06.619 INFO:teuthology.task.internal:Removing archive directory... 2026-03-09T00:25:06.619 DEBUG:teuthology.orchestra.run.vm04:> rm -rf -- /home/ubuntu/cephtest/archive 2026-03-09T00:25:06.626 DEBUG:teuthology.orchestra.run.vm10:> rm -rf -- /home/ubuntu/cephtest/archive 2026-03-09T00:25:06.677 DEBUG:teuthology.run_tasks:Unwinding manager internal.archive_upload 2026-03-09T00:25:06.680 INFO:teuthology.task.internal:Not uploading archives. 2026-03-09T00:25:06.680 DEBUG:teuthology.run_tasks:Unwinding manager internal.base 2026-03-09T00:25:06.682 INFO:teuthology.task.internal:Tidying up after the test... 2026-03-09T00:25:06.682 DEBUG:teuthology.orchestra.run.vm04:> find /home/ubuntu/cephtest -ls ; rmdir -- /home/ubuntu/cephtest 2026-03-09T00:25:06.685 DEBUG:teuthology.orchestra.run.vm10:> find /home/ubuntu/cephtest -ls ; rmdir -- /home/ubuntu/cephtest 2026-03-09T00:25:06.701 INFO:teuthology.orchestra.run.vm04.stdout: 8532145 0 drwxr-xr-x 2 ubuntu ubuntu 6 Mar 9 00:25 /home/ubuntu/cephtest 2026-03-09T00:25:06.737 INFO:teuthology.orchestra.run.vm10.stdout: 8532143 0 drwxr-xr-x 2 ubuntu ubuntu 6 Mar 9 00:25 /home/ubuntu/cephtest 2026-03-09T00:25:06.738 DEBUG:teuthology.run_tasks:Unwinding manager console_log 2026-03-09T00:25:06.745 INFO:teuthology.run:Summary data: description: orch:cephadm/upgrade/{1-start-distro/1-start-centos_9.stream 2-repo_digest/defaut 3-upgrade/staggered 4-wait 5-upgrade-ls agent/off mon_election/classic} duration: 2015.643713235855 owner: kyr success: true 2026-03-09T00:25:06.745 DEBUG:teuthology.report:Pushing job info to http://localhost:8080 2026-03-09T00:25:06.774 INFO:teuthology.run:pass